var/home/core/zuul-output/0000755000175000017500000000000015134650510014525 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015134664714015504 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000355407115134664614020277 0ustar corecoreisikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD J~63I_翪|mvşo#oVݏKf+ovpZj C4%_̿f\ϘקjzuQ6/㴻|]=ry+/vWŊ7 .=*EbqZnx.h{nۯSa ׋D*%(Ϗ_϶ݬvGR)$DD D~m{]iX\|U. $ॄKЗ/83Jp ώI8&xėv=E|;FmZl8T*v (6pk**+ Le*gUWi [ӊg*XCF*A(-aD~JwFPO7M$n6iXύO^%26lDt#3{f!f6;WR.!$5 J:1*S%V!F([EbD]娍ԹiE03`Cfw&:ɴ@=yN{f}\{+>2^G) *ɚL}ӄ]C }I4Vv@%٘e#dc0Fn 촂iHSr`岮X7̝4?qKf, # qe䧤 ss]QzH.ad!rJBi`V +|i}}THW{y|*/BP3m3A- ZPmN^iL[NrrݝE)~QGGAj^3}wy/{47[q)&c(޸0"$5ڪҾη*t:%?vEmO5tqÜ3Cyu '~qlN?}|nLFR6f8yWxYd ;K44|CK4UQviYDZh$#*)e\W$IAT;s0Gp}=9ڠedۜ+EaH#QtDV:?7#w4r_۾8ZJ%PgS!][5ߜQZ݇~- MR9z_Z;57xh|_/CWuU%v[_((G yMi@'3Pmz8~Y >hl%}Р`sMC77Aztԝp ,}Nptt%q6& ND lM;ָPZGa(X(2*91n,50/mx'})')SĔv}S%xhRe)a@r AF' ]J)ӨbqMWNjʵ2PK-guZZg !M)a(!H/?R?Q~}% ;]/ľv%T&hoP~(*טj=dߛ_SRzSa™:']*}EXɧM<@:jʨΨrPE%NT&1H>g":ͨ ҄v`tYoTq&OzcP_k(PJ'ήYXFgGہwħkIM*򸆔l=q VJީ#b8&RgX2qBMoN w1ђZGd m 2P/Ɛ!" aGd;0RZ+ 9O5KiPc7CDG.b~?|ђP? -8%JNIt"`HP!]ZrͰ4j8!*(jPcǷ!)'xmv>!0[r_G{j 6JYǹ>zs;tc.mctie:x&"bR4S uV8/0%X8Ua0NET݃jYAT` &AD]Ax95mvXYs"(A+/_+*{b }@UP*5ì"M|܊W7|}N{mL=d]' =MS2[3(/hoj$=Zm Mlh>P>Qwf8*c4˥Ęk(+,«.c%_~&^%80=1Jgͤ39(&ʤdH0Ζ@.!)CGt?~=ˢ>f>\bN<Ⱦtë{{b2hKNh`0=/9Gɺɔ+'Х[)9^iX,N&+1Id0ֶ|}!oѶvhu|8Qz:^S-7;k>U~H><~5i ˿7^0*]h,*aklVIKS7d'qAWEݰLkS :}%J6TIsbFʶ褢sFUC)(k-C"TQ[;4j39_WiZSس:$3w}o$[4x:bl=pd9YfAMpIrv̡}XI{B%ZԎuHvhd`Η|ʣ)-iaE';_j{(8xPA*1bv^JLj&DY3#-1*I+g8a@(*%kX{ Z;#es=oi_)qb㼃{buU?zT u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$GZaPo|P5q @3ǟ6 mR!c/24مQNֆ^n,hU֝cfT :):[gCa?\&IpW$8!+Uph*/ o/{")qq҈78݇hA sTB*F$6 2C` |ɧJ~iM cO;m#NV?d?TCg5otޔC1s`u.EkB6ga׬9J2&vV,./ӐoQJ*Dw*^sCeyWtɖ9F.[-cʚmD (QMW`zP~n"U'8%kEq*Lr;TY *BCCpJhxUpܺDoGdlaQ&8#v| (~~yZ-VW"T- 0@4ޙ-did˥]5]5᪩QJlyIPEQZȰ<'$VO"d.wEр%}5zWˬQOS)ZbF p$^(2JцQImuzhpyXڈͤh}/[g1ieQ*-=hiך5J))?' c9*%WyΈ W\Of[=߰+ednU$YD',jߎW&7DXǜߍG`DbE#0Y4&|޻xѷ\;_Z^sнM\&+1gWo'Y;l>V ̍"ޛ4tO,{=hFѓ$b =D(zn;Y<1x~SJ^{vn 9 j1шk'L"cE=K]A(oQ۲6+ktwLzG,87^ 9H\yqū1)\(v8pHA"ΈGVp"c ?Z)hm.2;sl$瓴ӘIe~H|.Y#C^SJĽHǀeTwvy"v܅ ]?22R.lQPa ˆSܫ1z.x62%z].`Gn&*7bd+, Z`ͲH-nမ^WbPFtOfD]c9\w+ea~~{;Vm >|WAޭi`HbIãE{%&4]Iw Wjoru ݜmKnZ<X; ۢ( nx K8.|DXb +*598;w)zp:̊~;͞)6vnM!N5Cu!8Wq/`FUwWAֻ,Qu W@ Fi:K [Av*_958]a:pmQ&'ᚡmi@ zF(n&P;)_]µ!doR0`pl`~9Fk[ٺ+4Hhao-jϸ??R<lb#P-^39T|L /~p│x@Bq"M/lja\b݋af LnU*P(8W[U6WX ZoѶ^SH:K:%Qvl\b FqQI.ȨHWo;Nw$͹O$oEE-eq=.*Dp,V;(bgJ!gF)892sw*+{[or@x,))[o新#.͞.;=fc<)((b۲Eumw峛M2,V[cm,S~ AF~.2v?JNt=O7^r.@DEuU1}g$>8ac#sĢB\PIPfwJQJ;Qxm &GBf\ZA$Ba-z|A-I @x70 晪MV)m8[6-Te@`E|=U D(C{oVa*H7MQK"<O%MTTtx袥:2JޚݶKd7UZihRk71VDqiގ\<:Ѓ3"gJJčE&>&EI|I˿j2ǯɘCGOa9C1L ={fm&'^tigk$DA' elW@Tiv{ !]oBLKJO*t*\n-iȚ4`{x_z;j3Xh ׄ?xt.o:`x^d~0u$ v48 0_ | E"Hd"H`A0&dY3 ً[fctWF_hdxMUY.b=eaI3Z=᢬-'~DWc;j FRrI5%N/K;Dk rCbm7чsSW_8g{RY.~XfEߪg:smBi1 YBX4),[c^54Sg(s$sN' 88`wC3TE+A\.ԍל9 y{͝BxG&JS meT;{З>'[LR"w F05N<&AJ3DA0ʄ4(zTUWDdE3̻l^-Xw3Fɀ{B-~.h+U8 i1b8wؖ#~zQ`/L 9#Pu/<4A L<KL U(Ee'sCcq !Ȥ4΍ +aM(VldX ][T !Ȱ|HN~6y,⒊)$e{)SR#kהyϛ7^i58f4PmB8 Y{qeφvk73:1@ƛ.{f8IGv*1藺yx27M=>+VnG;\<x7v21՚H :[Γd!E'a4n?k[A׈(sob 41Y9(^SE@7`KIK`kx& V`X0,%pe_ן >hd xе"Q4SUwy x<'o_~#6$g!D$c=5ۄX[ു RzG:柺[ӏ[3frl ô ހ^2TӘUAT!94[[m۾\T)W> lv+ H\FpG)ۏjk_c51̃^cn ba-X/#=Im41NLu\9ETp^poAOO&Aٻ֦6m2Яݏ2k'E$9uLc7k=(I8# $T ]PG{m" 뭗R==9!nKErHc1FYbQ F;v?lb-ڈFalG*rEX}HAP'Hҷ$qM9(AHx!AF 26qxCdP!NZgҽ9l*(H Žڒ;̼|%D Ɖ`Pj . ֈ,ixp`ttOKBDޙ''aLA2s0(G2E<I:xsB.ȼ*d42I:<ŋu#~us{dW<2~sQ37.&lOľu74c?MՏړ@ -N*CB=i3,qjGkUl4 ,ŸqMHDCYZz Qnz܁$Jp04ȴIL΃.0FiO-qy)i_TA|S2G4miBȨHM(2hys|F 94 DNlϒòκ-q|xC ,gKDzHR%t+E/wd#礱ºȄWEz o\JξB.wLKZ39(M +(PWՇfR6#ю3Ȋt ݪbh]MTw䀩S]'qf&)-_G;"1qz퇛0,#yiq$ՁɄ)KٮޓJ|̖D?:3mhW=rOf'/wѹ8BS8]`;=?,ڼ"ϴq*(A?6 /W= #^ub"6q f+=^OI@߱^F[n4A#bYѤwd)J^Z{*ǥzw73LuaVad=$6)iI gC~.1%YmҪ+2gSt!8iIۛ*JgE7LGoş\bC}O i ycK1YhO6 /g:KT sPv6l+uN|!"VK^΄t*3b\N7dYܞLcn3rnNd8"is"1- ޑܧd[]~:'#N(NknfV('I rcj2J1G<5 Nj̒Qh]ꍾZBn&Un' CyUM0nCj.&Oڣg\q0^Ϻ%4i" ZZG>Xr'XKc$2iσֹH<6N8HSgߛ>uMik{Fm(W F@@{W+ߑ?X2hS4-=^YgpUHެbZ!y!ul@ڼ63" ۩:6=TZõ$E,ϓRV|G&$rr;J TtIHFE=RȬ]P pLm|~$%>Eü%mWO[7x}~ے4WYr9Tk] AA$ұ}21;qbUwRK #}u'tRx}fvvPXdQSg6EDT:dׁz^DjXp͇G|X5Q9K$)U?o': .,wؓaՁ_ 3]Q 16ZYafuvrq^ѷQT},!H]6{Jw>%wK{)rH+"B4H7-]r}7v8|׾~Us?yWfv3>xpRҧH-EeJ~4YIozi:nq Vq8swHOzf ̙eX-4`TDGq G.tݻgq74ŠqBFf8 9Fk Afq#ϛa$!qNCJ4bnv B @W,v&- 6wCBjxk9ᤉ ,Asy3YޜZ4ΓVYf'hk $L~ԥ qކ );B̗ߐu&8c`td 1xh˂U4U/ӋQ`IRҴ225UY5li_ r9v!Go3 "ӎkk8L%H㸡]V.;lM>*2 5I bHb3Lh!ޒh7YJt*CyJÄFKKùMt}.l^]El>NK|//f&!B {&g\,}F)L b߀My6Õw7[{Gqzfz3_X !xJ8T<2!)^_ďǂ.\-d)Kl1헐Z1WMʜ5$)M1Lʳsw5ǫR^v|t$VȖA+Lܑ,҂+sM/ѭy)_ÕNvc*@k]ן;trȫpeoxлo_nfz61!iaI%~`{Tռl>~,?5D K\gd(ZH8@x~5w.4\h(`dc ^}Ծե4NI ܓR{Omu/~+^K9>lIxpI"S S 'MV+Z:H2d,P4J8 L72?og1>b$]ObsKx̊y`bE&>XYs䀚EƂ@K?n>lhTm' nܡvO+0ftZ3lPuV-]fz:햘s >˧Byd}gs9QNʟ. /ӦxbHHAni5(~p>/O0vEWZ nY3 cU $O,eLacoW1oW=-kqb> x삖A7 u/~&ӄMu.<|yi I?@)XJ7{ޱ(e!O.`-3})N2FVg{yQ0clz<'&?zK^\-8!ד| H|?!+A! IcuR.o3MZ9z} b#s9@*иrI@*qQN|Ix;I}&ݢ6ɢ}{]x}_o>Mp' *=]Q$b =ݧ?v8FHӜ"D$aǽO8'1lfYuB!%{u:%lXZlvwohbL_#ǂsr_d >04SRm+0^PTi-"] O('@BKD6 {NmʐzRj.aQcb^CZ-uvpr CѐٱlGNzIveca=%1Qi F>wTLHUGӃ\sA֎Xpljlv ^tSȻ \cPwίwX"{>9V0ټ_`#U8VdTtD_GU9V ұ{q:ObUi7s )B ۊZlzIA4S#x,T3ѱ ԶJ=rs>Nb: Q6ˌ߉J%.Dl2ȱ%ܱ&6XƟ6qg(USok+Po$lwvmi8W_VT18V =| ub6QWCnY'"*aN08wuSEAVخ m3 o\` sHc# fqT .,ŀU|⦍߶/*~48âF,#[:y_YIpʼn)dk!J'Z5=r&; (y*b*O_ULT.ÔD[%s1,jЅ@k0Ցu֯dtKl$Y5O*GUڇvI`b0ο0~oI`b#FOf_$0!i rS/wvҍ%Eb/Ec|U9F-)L)ŘF`U:VK jeFrԋ7EDYpԽ.D\dNyj荊EEg]bÔF˩ք%EGƶ*NX)Hc(<|q@Oޯr^3>Uf1w;mCja:-1_k٘%VbZ˙#G6 `q+MPU~l!.?I_Pĝ"] rT [eTr؟˰ ]\ h! v˱>5S1px fnk}sRmA>d2UAkؖvlX܇Bz1U_#Xӫ+al H d\k/I,k,ρ|`zR/$@8VU^rcG"E7\qtS:ڝUyy >Vc11*?xYa8U`Jw/AcL~|;yj8TR#s"Q.ϊ/Yrx+u6*27fǪC%+A~*Zآ'ѭnۡ|< a1s\ T5҃FZh?EV"sd!@БU ^p%pO3|B5=2怕nwRqR9~ i±za+HFNi>. EWz:V^&YEs5Ȭ N *7{!fRБBSۘ† Er/IGU}APQT]|XN X]FbKjKdO U6[3TTX)|*H'2U0:VunBl  `5/@ա06VNO8VGON@KgjyK?Wq1egI+ I.*F~L!Gf"LD&U 6tGd#fR*c ^tSLjnKS9 Ȼ \ >lr&}+̼d"I va,Jm_u)d靕َ| Vw85F3Liƙb<;dM-})C?Fw*IJ_3UG'+¨[9| >80\+ xJpΕ`p~mg˗%F Rg(6=/r+%a>w Ohght uЍaRs ^d6GXAf?V_mW puȇ S:tŴvŀU#-*mZ5k5r)_x*8ͼx@(k:_TX%[paRu~}#Ѥr %A%`;MxB[CzR怕#H% }8@*AM.SEhd,rKrʇ)br\+! s1CtӒNc_:F*`Nv;ogQFa2V%ZniE|nZ&-I,t*ώlo Lhnٓ'Xm R ˍ-~ά}hs\5TT%~am.>!LcoJrKmqvez܅E9t6FZXgsreHhlٷ+ [}r:̓?W~e6>0E8`Jq-(ed;W¨:Ä&]䒿e;0:|$Ȃ1L-%;Ƅ{dɱL;V[bp>!n&աIJX1$9;[?- й vRCxKVV+#lj@_RL;IQ8ŢΌXD@Z< (1ZRÜ:OUM/vư{'jYXE4S/8 7: `/ +G\ U>]B2/n2=8) B gJ3bcKo̹ʇ\B~Is 2sO/I!}xV&\b<9$4Nve^آ]$LGF@LjKٕyzH 31Հm-XıUXF|\A-2) ' RG6h?āUŔyj[j_ӂ~ яA弆^bDyzǖQ8`jXbsK?l58,?YP5䜭ve9YFznTEf3Ja\,@2,?WYؾNr<V` =V[oB5!Z\ļǪЎr8@*ucѡv\[|s L-+y{5K@dzp`r"mũɸHNd"yc Pu>x2;W`_VR<aӗ&D<=h-Rר|/r _ǖھcߖ]G@Ն;UQG1 '3Jە Q88ASUȿ!:WѥLf21;d9OU᧯MR3V:<}xXh//T+coY5Ȧ4/m0NE(G2[+G~H'5ipӘ͏O +Px SPp.,?Uv|$+mY/%x d2Ƌo<C`4׼Cf")YrPVq%:UH0%Hcaw?S"cUDVK@s`6ScUtwAb%0.<"*D.EX(̳<,)2r EjRb,(TqVܞ$_~C”.o"kGa,ch-ţ2R,*84Z ƪ<_e=HcUU5gJ2HA~ht9Xoz Km|τ?Jg*hӬ^,.)Ꮧ0vǀ}>9қJ g@ۀ@S7 Ɨz415n?^rԟW#ŕkf  i]yqeX4C{^\q55/DCpۚX ۰@øaaY_{@V,˙^`9q_pG7~$_ϹJaV-&LP) FU6A3]w{<~` ?Iq5DZUq.rg;9HݐH'waq_I?_4u<='=}OY='}}OMBj08525)zs]qm*gH~wc͖*K9&Y{(OmW=98M[W~W}zW55T5^/<*SF!ƀQ{7 Gֿۺ؎th$x>`9^1>Ymc9K$_ ݗר;p%K/ɳ s wD4*sxH]*Y,Irf:w WO1)?1_Ƅ7'U0Q@fkX&*|1-R~=x;fpPlV ^O׏Zi*L LDZYຮaIS 0U0LY2/kkWa2=2y̏dg֖4B:fƞ2k(.hx?&hy8$0I62m5o!$h*@q9eD^u0F*LXHIbim߿mk7Ŵ }D>]fi݌peqIyg6ϙޤ9ܦ|HZ0h(ߺ n k~,* ɯ \yHbI"s\, oUn4(ߐݹwQ ˥< G%V`H*L@obX&~>;kf=:BCQ)H8mY,?"4k|h 0yզ9<$²LIwjI)h 1_o~|-$@%!>}L+4O,c~4/yJX̴Ŧ2pӇ=ǥA99<U v(e.+j: !|&lKH1Uu  1?>%a xޭ ^lӹdB8oq>ٺbETKzNODbqS,bQ(~Kzkx ;^aw:p#t?]| M3/5.ŠEɋi%{xjÈn!ؼ@Hyۻ/'„HHԶcEQz#R% $$eDWWI* x|FQkEmMvWxqE8,4&k]; "eF4>> XTE;P-xHՂ0@w8R!N+7DS D- , 8MctOR E}h)U]hD u X1(E F؜H[gwlyMUV9 uH! >c\Gˀ cCP(_Ң`D (SMQs.ji% i$qՁ"ޓX㈬jk߰89)ڻ/ 2͙XEn?oiƾp\nu ,]]}*v_C"ek_o[=9^i,y{&yc=1}( e1OfD{=f̲papՍ.gƒBCt@70uL һ ͡C"TJZ*~yvmKDqnWXYBj!j>,i΋^[{ R"\?Ϯ4˳\* u[6}&Rbwε_E3%CJ݇a+6{$d<7U)W=#2 Դ=#79kIdUۗu;  c*]>n*JZə)S7};X]lF1*rGK5KR+pPEfI8JovD>t׍R4]|2sI>wz¾"R$JtmA-1j_~q+>eG dT 3'kI*MKݯ T=5ET2&YxXHvAi6:1\Ql\Y{W]R$DUH&u=솗 ؄NtU7 0L݃RޖU7Pg\gݛ抰\b7Wkaq)r=cv D:R(UI">vȘ%a Rh,6-Bl>J NGhl?"Q(VF$etWg<ɻ8PV4"a K EW 1D֌mo6>>).+Fpu:Ͳ.%Tk>lwT#>c5Ѻ|EFrzSV2tⱯ O5O7g𓴸 q)Q]'khE6b;]ǺV`v--KX)8V4_0T v* y(\h/[պV4s9Ky,ͩ0W ④MZF[IFk[a- g֕jXw͕lGf8 [f(l>}Lj #&zStxdZC G?΀?ޠ[(@xgN(c:Qe(:fau@Pg S,09;͖)bsE-MW@oK#O 8O3fir}o~m>y7Ζ !4240~x4<~g -]U2& ht1CR,[>_vgl6d\=Aԯ/1K2;~0S2qhN4ZSS013 8g鄩u}f \_\{6at[m :Ð@PFБ9$z2t*n7ZnyUG:!yn¿ucs0ٜqئhx٫Ar7ҽßEsbqm&zp$J<ʨJgi6T} #=薡\0؍2) ga̴ B^|xwN8)Կaa]b%{6g: ?ݛ;usvg–,/$*~Q]ް} 96=c]s6W0:wAcqG=vLP$eH)y.@R,AM4Hⵋ}pļoNValS׵~a?ey`L=BI15%]`~JwN* L.S߻HE/@u4j?}}@B1C1&A>{:tJ]ǥ[8m\UA޾{g?8>iG9~/7yrDs B &( 3L^='u}!baLfֺ3@p| /B(^PmofBĭ;! = FJg|̀[#3l2P4  !"ޠE;}1G#+FGqZX>YXoKV3S ѰtzM0 [{୎8~8\ ׵n OֱaJd^?0hxuj`X/FIGay{ (GbG#5sd@D{ DKbӰ- K!hh& A(Ahi{D"臓&@ш4x(|(`= @,~F +cWξ[P~ԯDnkAu,r.6J[ßT{N3\'d*6 5|u@ĩG4ʾ:e_玆vwnl78`C98y`Cv|tAGr`XONЭ;ncOHn g<玞yL⽴ w4wO^^'x!h9 rlP1/phۦ xh:h+ Gq-i^K#ƾҀ[ǒ٥Csy2!q/1ـ%荇?g:^o\ڥS/B'8ke=^ V)X5`e&đ*=}9(]ģy rz_q6p_F&!˲iY7BK\/K{aD.r}L-. J,HH( X&`?X VW (%qX̫p4`f}.d!gHϾ犮]W$ vE<'?=!Qka__p FTibiq[UrUc8zÑӵU="o3(YڸrCrEI)] H 2 vl0AWq]|9UPdDX0P4C}Q=b#LkUR S+QDƖǓ(2cG&m8z(IFWI=cRd̄òUi'3 j21C.GMIƖ\7hItP&>ق_"3+as(C̲>aTB/X擊A[ׄb]uENSOeW֫N-,0ÅMbC#XNU[9֫ 8C4.bw61lsTe)A`WH_jx+ii i@-Nm<)WX8Wrv ˛ vi} / R Gl )ߵ;`ԁ|"5\k' M1tvy'<|`Te1YL|-Ap%L%a^Xa,@lՍK3}?I0r%D7t`1g? n̸d-!u-\8Ue(*=^vҪI4 EEiZ x:A) uY[A f,l^&N*a*7Ai3g (t}7 eWUežRމ(A 4 ['Jh;L]om_d[tȇKPi VwDinft$W4gQ.QfGYI mAˋ˷t4`p _ȫ2}5uB O#>P ]JwSNOUn;fԳڧ;w޵שw ޝViڏށPgPgB uFHBe넲eʞF({$lBuBuw'}# uw w N( v ۝Piz$ہPPB FHB v 4؝i$4B.6.0Éz4P缸Ήp/BPg9S_TY4'3qUV%NUMe\@"CJ2-cZ[!п/h6!eCd7t=L9~9gI<-W'н?G  Y_|E2O 0`C4'[6״+eA?>lDw./W:/TX(ZrSd4/xUȞyhKOyV ~Y:Zʛ.h̷0ݢ_9Z>j_ ,!j40%"/0ߪtYIf hE |ދ?xA0Cxea%pᣯ2j *D)qVp e"[Y"pzͭ3xO2NjO<\1/f0_Naxl˚A{2e45 Fr\({3q^0ikOpˬ.)ǭ!2Ma7jخPQ1wMeߢRPe$"m1A0֐Bo(NrV84FL: 'oT>0~wʄ'CxI64.QbzeC!H'10"^jXN3`p1^ <` 3hZ[6 }騞u/qTf V6f]..Lpͩ|z{_r=% Y}.Pr6.U*D+(w=ײ~(8zܳ4cFkF1;eBnua,aS`) P/wxJ{xJ]~YeZ~[H V]$וrGǘ YGUl\:jܳDfhψ,:tM6,/bC/E%xNkfJբK;! &T .T EU Q~(ᣎilq#\$dZP$3:l偢 ,%JD`ַXA;uQ~1e =`1}cIqֆS,yHɸ貨5 yA']q zꛣdU\ezpί2`<=T~LzX9$Cg\ u:\e4уw f1Aoֳ(j"#MMlm;jf-8@r$ݎ k(&.G>5!C OߚLEx[G'"R 괇ֺF ? Yd,+k@ l \!*B&LsPOVNIx{5-j4p%Q#(h,-ޖ;(D(ډ"ĵ=cg_»ymNk[73v@!6'ht 1- T,.9omKXˁ攴j4 %5ᄖcjRP'dvhYCFP$xJ:c>2LY&"RLAc&0?)ZyfXig;b PO:Nq2+)=EL۰+aqA.@^s݈p9>?CGCxM4sfxsv*J>Y6{׾rDw]5h aKKa^VjUM&YXLD_5MgEM)R-c94YN7tLrjo0HNX{@iCӞ>~[ebe?4V2HS4;ʍ̊%QI;2c{HkYL}Sr؊#!uprh`@qW}  :M[oP8 mC\Ad)6m!aR;T.dPBy]#cF<&¥W)_8@f]`mvtuX$bpKFkZ^Ud嶡y?Y6 %.oF.3$,Чus@@KL"$G!ͥDԱg ޾PO\MZ? F^X$$jZ ?l_2up_aND6ʏtndy4hk?ɺ*ޠ8Z\p7b# 0E^hP0E~=(f"Ǚ[?5\Wh·se,׳7seÞm95ꠒt@+Y4:œMzl/ r>&I2|h5q 3:[(IfUhIo}_=z߾jA@&k~?,cn٪yx{ttD$LA6ҁ?dINU'n `3oV6,nӢȉכxSZS,_fʴcMntAbu%O#>E[#EY_/Z^(>0+ɩ!s$4%xt⥠|摰bν"LB`2$%ף1Ƿ%PH(z)7kUP`T/x A%(A8º[<.p,8.gਤ㽇{A3~EwM<޼ R,1@gbFV V&%6l饸#^UPqHJ`㳜C䄟 t$$YXƴ )ŔRHMb]؝Є4]DoH꤃Lţ5nDv )f0~XfS>wxQŴlh?XOXsۛ 6Ih/""*oI(f(l= M32AHIu1hv(.L/NW#@ &bJ+,]o- `9w%|ôJPע"C1-vwlD&8?;]r%rB%*HKp.dcޱ#uki,0ĹzMj[=n@6H>.UbYYpfUǑ4~|m*mHHY]}&IgNxKL(R,A{xAʓfnB6+t,Z=*{ Br >yr>kdzU=gԦ:iGM KB;wIMJjJS67io' }{51_U%y.S*a}SJ9 |<.{ b4ثN﵎BAn3$ۓ )<~nfF=dJtIJd)+PC fFgu\UBK&p,8Zi[3XO4_hwrl/0Rq|{<aJl'V\,Z8)E"9~瘓VB :**ƼmHa,9}+T[bvr'l]m'3\K//qw%泓LYjG;+:5H*:`j]:)jvŦF(.Dnbqd.LqN[tF[TMNUOKhxk0Bу1F"E/< 'vK*#7on*>k࢒B9!q5,9׆ox.[x_- [HJ]-K&ir-AB"%ؔId))X[:OeXW,4)}#oqOEyķQ\&8{ĵEBE%d,HXO[@T[=Jy#WH:wKpmؕ@XO}o3P5[,Gb@ ГGn+91.XX{t\}ԫ%&DX$ȝ EȢ N˂ĦKE؆b3KoەG{{ຮL 1 H`, NĎ[Yr›(* G5ȉpz$KnsKDUY2a }ʌczJm}Or9Gkگ)UiuYu,vrBQF몕VR^n/t`s'+Ǝ>R0F sm>˺~n&RP@ VD R3K4#ѬJIatR]9w[@^Ќrį|]C0E!S~"UJ td`fu -(M SJqʵCy93HUR怌i okKo4ItFʿ.I +s zHkE" [כj|Gt`YpubEC#Ue]>pȂfd<'8gzUc'O4dd;<}ł\P *x7к`h HEr"$߃8+JtqKυ5O乶ޅk6ߪQM2i?vw8>קH<>p֋hU;% <1)<$@o: Owʿ~_|![Rۯ7×1nwmF<AlJ4-W!i𩉪 v~|yiRh3Vq~?iN|*#Xp1]z<)gq`# 5້ϩ  fW/T`stW*L 9>!&~:GRQwEXqȂi1AoG3!9^[u$P%AmWA{|GY͞I׳oNƘ)p !"w+ U)>^eW&7No5Ă`:-vw\'K.^[]Sj]w,8Nvœ9ɒ:;cLq9BSE; .l{ӦM}Ŀ9ݭ_ Z܄BO^a{3& MED6Gʪ\Ǻү:3nwb5N/6BK_FH_Tw6΂6p]g#&/x >4q?D*FLzW# pV R(-B ,68?C׮;} ;^;n6}8<2p`OU1ybi֥-T/!TgWqn_'{P_ cCCĜjl҅1ܲi5YZ_SpiboԻNwl";_[{Tc\A>3@vf&oڽy)XC> PC YBYi&)tOOvȶ7cHfksRG] e]j4:/@*<*qeGeI:WGytUOӑ0^5`56D5@@n_; Ajb,/cZՊ9–xBaqrH_cq!|(W=CCR2I5IHJY⣻ꪯ9BUK;6Yݳ'䇾'@0{ IG¦X c!הoxt&-L'ED޴sko3B/Fޥu-EcD@?߾ԍ 3PoyxZ:=#K -Bwuta\6">d{e18, jxhߏ+Sʦ px O D2,։)#8V. ?8ea0,E?O;wif~B,*Û4޻g!|c2i={"jn zrQ"hq ?IB0BݬTEJw<ۋ^ 7c}V '?v;=R$RTGKmDu>bҌ*V;Um xg T:uWziB&u̢5Ч釳/2fX$D+F[@> _)No0ޢ7=j zql= ^2O;>W\>Ap|s7?F>[Mo| .> >=am2p=U =HxmyѧIч8u凣`'o*P7 M-?㟫3Q7d\ףI@*d|ct#d Bb dh :8:#r1lUȿ'm8B\s&iⱆ.{Sm8+d l/fl/w#? w`~Gs:Bኁ0#ײ^HAEeW٧)E t= W%~发}씃5Ļ$BVTY(;[jүZG`7e΅ ҕ]x*#0E%tf.;!> >%uwVg*KWhSƎb FEaLߙ=|xӴo* mD0VY 9͆D2û3gW=턌V^r3+1)#ήzŬ6E8jgLc x GлGs'};5}W}&\.ToJXckDZ]e_5y&?Vp\])M]'J_c7۹ۥ ;# 0`Df?u*ށ|E>3n.0&FG_|xrG1UlnI}W07J #n񴏓̭wfw;YOfߞzR 5~LTh (n*Ϝ@)`Fb]bI$Q9]?JbkW8{+[țr#δT%u)O"j.+oTy =^6x/5Iy%p P'P*E\JUNtS. hnQ|# ;p,fIS.(׼E 3] j5M:,6`u!FZd /hSP_N_ :6{Z0AADZ:ȼD)ODZ8äUÐk0 Cە D۴\srRٹ)k4_gb4ʾ^6ئ/]aokcr}qe'O~)FҾvBW U/ y'mx^{BW&iZ_c-x9Ŭ*ar>)LemlҺZ.ȲUogEO:6uJ}CXQF߱Gau`EW ?_7o]nԣMǎWG&d~H>K(78cwH)/),>7]IꫦZZ-;}{{[jlrA-é%~\`bx f5Y`XǛw m F!\ݧƁD* 3ꐳHsd6f u މOAtFy 05"DeKʼn{MٱfM>)HdujZyB*x}Ȣwխ;iGZ:G8FcM6cI'#p kzGy!K6m.P뾒P(SH7= -חk@{U/sG3jB=i]>L#7glJF@)ϑ7F@ϛ$Fh/+LG>#{/ w;i-b[@-i޲bc+wOڮf^!6V6>` .㜟w=[lךV#?o?\.ʀZ/[; o<'Qyo ud Y #h1c.x4G )0 C*@evsY.)b}ûnuk[b1^xpq.f eMpɝ?бQŠ)S9V.\b*|P([MrA:ss, kfENR\VQ'vp+08TƲ%G1nu%o95TbTk I KpQ@c/o!BshsN(Ͳ;e3Fj5-luu[\(3!Ƃ9/4.gd0m/ѧII07íX?ߞ EA`5gmG+}+)Gί2r|9QD1@UiE#2%-Z΃cŘ]=fŽ' s*kBv#U6qqa!éձJ+ͿصT.`|V ٷ?n=+` |!x{vv3rx`rgڄ"22+1ӞPhdEFy@N\u"7<+gzo ot"iqg3Z]ޫN ᓯg(Ιx9ϿPTڅidZFg6i;wz_7ױCS1#=<c&pf<ZX}nBA6Rw-5}jN%EwXQ\ڒ{ En}PT31!%7ic .;My,$S0ݐm%z.$.u6(֭n5%.Aa V HQ '\0Y;- %\DX . G $#Rkh% yioEB4; LJ;X=.4$#WN8浉5 iPi҂s!eT] W(ZAJgU 2ޝ5u{Aچ>x9*0y*'@/c0#!Àd µQ1\0 l|GERtm-a{-k3TPSbdUXߝ&_S@-UKܢ`& &K1DiVݢdP{+aNKDZo MJd @ D<$\%dx؟u<=3mU. vll7K 8e/ [K9ݛuNv[=FF$֗=J in0r}pcz>>"Uet>+-lU{e 1 b񓖭l'.el'rCgEɧbs@kW5Ěa|t$ߥ1JT[bs ~hiy8S4ݕGR݅.4wƠ=Q5 FSqKcCR.jV rG[#D* +p>(⭲HQEP DV >ktg '$3ќ4>BbZ֤CQ\Bے !zR˸ H=L44.wTI<|P>X)Ch\%n l 3* lϔq+In3xC &zhɂ@dP #8D$E< *W mlC*s 9^ @aDD ٓRh&4XNL4G  2#sTCȱ$f-*J s֢xR' .S콠 J.ZsL"'[Hzs`+ΥMB e^B7mSH3K!GM?ii VSIy ,0Y"Yrڔ(s<$IEbq6""D,- >*V)0|Ѣ8%)}cԆҪc)=^3O;Co@r?{ ~]m$pIAe}[d[!%۲J_ںMY=r8p8sYܳKoMoHpWQ)ݸZonCrNOo8^p0;H 6{,|äb ל&Jqh`Id >HrMZDB]ZStQ%-HhJ?i) p+$RPݴTHJuK"@(Bq`*edYdKQl)(QmR@"Q1"$)uaMsEl٫0E(h %X%W-tERuGp7c耢TJ%莨ηD"ўQsiXbT#`'T@09qQl##"iJHz]OUB SXn(G%,Lih2ưLI(N"J9ᰄj(xtQ<5"3<Q &WoBDeS'd5G֤b"Q.{9Ӧ!1?&n6?bcxO^$y*ōW}0L*%4q~4J7t 0=:[y #z(w1ʶP޳y`g ԅ`[d"76\ӲNB`cZ3^:;%t<&Ws ¡ @`اoFlsEˁW>\|VY&K]%RaZL]ڢ)E޼27 4mxfЪIlz)p^n݃ ZcA-f${χ~nf{PU D1"+B_Ϥd i F#e1o}0Z:N{B$-rEzO&`n!Au~̍rƱ O=ZkJ4GoEpj%8&,Z]lҡ6^L-hWQ%#Yˢ7풬s+$gTi>0+4E<+OutT#Ct&M::B[ WfN b G㨟$fx0 uGûm6+x7,2gMEK'^n239uD($޹̆G5$)lsxReTƭ{\8, 䈐A\zNULO>6jktS4Ӧtw)m~3E@h@eW"Mě'[R-%?{U#X]"UqC5ɥ /<;*H@LlZ1 |4Yk-JGl UڶVoVylW6;"NS1[b9$7wv^5t_J\ڭ{+F,F8!c[0Q&]PJ(YE-21,C-?Q\i!H=j8TZ!2pLnc. Q|vʪ_vLqNŴ Ϫ"Bj<6pX \`boSN ngs4>33僃@ 0Sh$))B-|&OaĄ1hdh׽3r><5Lg1~# ږ(f$F5QL<,u o[jŒGx}42ܕi?qTj@T|#BP,c$>33oyҿefj`A~'6>a4Lv@>&w7f#>_!4ĦG\n.1}2/gZa60cd3y_{}쏟-z߬K3YM4:^C}\o~M`z*W;t~9 'cͥc^;3e!_YcYշctuo#[fW!"08 C1bQ442Wi,NH4L #qļ~9'g~%,&5k{0-5BOa*ݻi`4q`p~(*kx4$nEo0us>lhkt=om{j:xth!|l~~o%u 7*h5pq-'+-^r1 yaPOϖ^xN捬,,|5\; oAd QGaD|H(boY}pC9uCySG6NC yLP#RqJH_"D4% Z!Fv2]owYH-1`ՑF'Zo/8l=#B|ny}8"iVuj׋7m?q|Sec{Btr. / /rrrr U53 WHR3!#JB_`D$h\@j =ڋI,"N&MXo2SDcHI-]B! S-œ$$ .bS6zMBޮG#K9ЪA \*ɖulo}gXvaCZ^k魟G8rQZ-!ZK1f R8AgS)ZX~g+fquʵU'@oa#F\wO'JaƵΞO\*@IH_xOJ"6GhDi~eStt/e͊ԡV ̶D*Ƥ 0 LL0D.D澲$aiOZFTBʿ71%-!ǔ."k/.%Hk/=v^%-U%hQDګ{Cђd^/޿~90>pI0.8/`nqUfOsF%LկT+Tva5p0n-aޤzJ9Y~Ru/@ y7VZrY?P}QՖE Q%1JcMR:s(v?L~R3eQ5&uaP_hQETMvs'w^z8C]T# I'L= Cθoz8J9bH 凚Ŋ`~!PGAǟzM#֥3O~&+L( #2|Ph) 4Pj?{OɎܯsf? xcf\jb̿;TJU)EV×^02Hc6~&pN74K6ψ4'^Crrc_NYY?`5|.+㱛gUa5Lh"m*"(45JȏL,0>aL={v'iEP2͞ Go(`~P!E[\jQ/ҢK6Y37^(#)5x,p>TF\6XtwSvWy 48i#NH^!X;/ |&9'1*O 1e 9@\Lu%h=nZ\֊zn"9{`@_";c80KLzO6%"H̐t!Lub)DFhY1`9||/9^z~~+jq. 3g7lEs) u2EןKS4[ }א٬vZw9 ;i_'vCh IK68(qe#L` |r=_}.㛊zO__mF|+ / ~ΫGQ~#?qtG8~:~o|j_W?Cv#;}A ~Qc;xg=W>zG1wwa0C؞[;1%<\۵F/$hj:iXWU6cGJ\uq;|nDzEmviHW \pn-OVX.v獾./ `(W%^2 _aWk#u\ x+ʢ}mxqd1+A2-HxLi=ǛY7)8i0v}+|Bq͒H򵚑  C 1T1Q3Yx]LdFrOI^;))8"D&YZC;Br qsYNriW(x?xL?^utf^L&vL#4Z6{h}gk$Pʼnh/Dv hY1`9|&5].c \e&O&) N8;2B78 |6re߈ڽ25CFAr1*E]v- {$D O t$KH ~Xd٩7|KB-X29`}Ψ/݉Y4<(L5ixa \U:8(Bă.(rCVgz6vK٘JCeL,Fw b:CeЕ{BhBH |M Ro1## |.u=?IB|wG.(h. (j!rɔVi ]R=`| uŒ:)A1sb~ΝXN;~zz?3B+*f) 8g"1$hIIGuR uzV< J F?Os(`jH&)q^?KRV}ǰn >-#Hm|^s(S74 cjmXmd Q9Y#4NճB*L6ln [:v4د@W AyL7 'Q ^1ݯ' LNkWfW.yZo>8}IW~4$m!$x]uϿhdQgrt=o^R\#4v- ]SqR<QD򔈵ASS:JY8fᰑ |.s[IQ#4_ֳ qr%5^mYVHV/ Y]wjM}0w/6p'a6%,P`SA;IȠ)s1Lj^9Gw! E(-uDshPb9-u i:BᶬB)|ӻ˸Z@+ېtp O4>k'w^*( ;?7 %0 $ȬHJ;ɜVHsHK/tBWt ]bUUCTڙ2yJcHEZsRBW5[$GF|`1$EkEQ3kl11Lc exwG\٬>3ΖnN6\B\MlG탥xA2tE-( iVҾsbݰs |._kFtbzi䪬=E߲*F`&+%zZ bVk̈́Q_$5C>J4F$h7(i^VETСO{V H5ϗ,gߒi:BN`7.9,Q_K.oC>T8b(`nzGE{Q 0vG?(WlzGЇw;*\޷e`ѷAZ֬>K %۴oKSKTF(N\:noSTܥ/cy?n'F%h8H򊡧KBۘBb@cD1yFrZzI7E 66vt*Yp5%#50po\bP#C lM`2TQ;́>uC |&%{=߶k߱/19 OrRz?=I&?o5JnhYn׬ n2DJS:o'!<KΘB,Z/(L5&bjڼcУѰ&m檥.1MsH $U8%A䌗xQF;iE`>rg-~Lh}4cwJE)rf:qїd"p K2QsYnԒ d# kb+wrY0 =Qm8Fh4ܑCQ._jKnWq5-k FVMZ`vo_' {CM7 D6 DdYg /7P& ACxʴ3v[' l~hAaP>岢~Eޯ h'4O옶sxDzJSTv-I~`Rfhxtة[n3)o5r!**\#$*Uσ!%hMKB'2F @ 2]ꕛ8Tnմ ˩$wuP9#4>h(@]Nb'dJGh4\˩(庩6(ױAW*P9w*o&mȢ  37OO=L`FñŜn`L=Z0`JY岺SK&G wU5[p(A?G?523;+^'zi.KXn`%|l/j=&{(!I+E-lޯjǪC̯ڢ,jBfաtV]Xy:MMK`#4ZvfJ`DT٥J(._g2_">R/yD@r Kƒ. M ~&tyNj8|.$M,xzqh͒~s)@4ѭVéJUx&954*:ރFa.B19CȨUxhw2ZڑEX=hPs\ίol8#)M R:xiQeԼ{E/Q7\bUP^U+mDILh0\՜y59Ѳb5oqUXѝz,*}VSK1k1Z\[ةs2o3qὊLNdg@ &hT>[2B !p }C(,wgm&>Q WgX^_ %>ޣԲ (oFÆJ,7?n&Ck#ΉT{w][q+,$.G_\uەʃcW|y ] ɤ Q Mڱ!p0,)ާ !_5iVt\يP_"Cs@H:*k+4CL "ue WoJD~9uj1pcNVO탼 =y ]GM[;IVUXr}DmD~+wT޹˺gft]ߡm)Qw6"zu󱎘 cWŧF#_԰K7Nގb>ɿ9mZ6k?&/}saA?GA3;!뤜k+2@ǩFwpm=£N77OG\K2]"sI-G32ڎy/oWW˂$(}$:KT17a,O+  |07q2vy$Ԧ1'Q&^X|i~snV7Ia}\DU^ak#t8/#"B =x qg뵍4a}$iuF˥E(vQQL reV(kƄwAZ_F,}';Ɓ#J!S_/M}IZz8-7Il?9onT3! =KV ?-Cՠzx} k/~|~̻7Q{~4#^MhVjJ!9v8%A+o"gXJNԈRuSoQۣ"C[ʷFTHZQ*QɞB4sF%s:NVk|F" GD EcD%4*%'7_Cœf *4VU[_6 03q.mHeEB yerC>zc%]R_7 x5[!ۮcm,VUĹչw i=RX &4rT?z(Q1XnQ\N*U+Ez(Qqb ufFt8Bt{Y CdC "@L7F:6֑Y䰠UF*>/sB{$b D܇id9% :VQ*~e?MCtoKWFqNx B:db K]e|r0`ꔑ0,xS,!H:Ti`i:PoQۣ"G6b avD[u_'xeI]MWxJ`Csh#R2kxr>'FiZCmQ&K;칎HD#U%*qM=`C0Bܛ9q/gvkX Pg!ģLĨ:J H;\aMLPLU2wnp8TOCG(\Zئ\_149j'6$]*Wɒ ɢN 6J]!B 5J1JJu{ofoӆ!BićxYdCB<#M5) 2s.i3 HLR1kb-5DY;b%C޹4KrqZYgTg3%s/^m}Ѐ>ˣ%(gz;>ukP[!L($rx,2y(m,^0!8&g4C Ř! 04C8U%]4 7"ׂs:sNpק j6;`@ 1ѓL\]pf3b,iٸ\1eQc}D<+Kq,@sm%!&.'47e۴ӗ%\n6ԻSpjB*cHr2 tNZrfa_\5 y3(W ԇr=L?"IXB~XϞoC]9sC_umgh2E[~QTI +ك;D.@3MdT3eO+邼L]k.bxic*v*(7%J)YRU&ᘡlACg YHZMnrwJhn!~@=b eACE$U2<;H kKBb%Nߢ\󹀧7Ի&CQ61QR]I/W6`@NjǔYzpA% UuvUvtb)_T?jˍ4p!=jR/&Iz2yד"7'an,&^_E@_.'E1Y&cs':vyf'qN)JZ?S3Vx9Pg;. r7c<16 s?*J+ME(*Y#eP]P[sh!2L.TܭE<:4O'X`eVy1?[Ep;\B{.)cV:yʬ $lUd+aiƟY/~B 4b v, OcJ0 uTn1@3\7%J!7.70t&<VOXd9\>A{B7I21j1>wc0FH'C,UZK*4wi)('$I_ŰGnG]1(ЬʦH%}Z"t#mpCp+xм}'q op.[!l"ILV+m",ϊb9#O+{_7H/cI 6Ts -! @Tl^s/kfHȝEDne>Rt= =nɊ/EhLs| V "GQ fC"8y.<<Ӽ.,AC0K1FvjeG|픲|SWwh}Fo/3?z?.34(IX=~yc-l6-~5Yz?֤zUko}$Q>w'qvYY%wxxV ]apE*8u*h4O6A䌱 |D;ݴE$Iw5eOEe0syvP~8L[c_%^$KșKq2)8Zx̓gQHM)m:Y1^KB$ >jZ(۩)&3qlF?- R:`t;S6yxxq: [2J+#"kI%g7&1o.0o16LMT݊0eN bi^vbh|j-̸ص{#WF8Qvԉ7k*%NLK?'r]Ai.h;zۉ2QӳC-ow?/IgN2◣ZllUG(zDhS59Ha_։+x՗} C'S?d"~7'^;\ВQD~F?r6Zf0iQvTV)tyNQWQ JԠ͂ȏF$iwg 1.fK bk"?zݎ8wKWvJt޿\uZR/&9(K-('^k_@жG6 C m2Ɛb@0iY]2FQ%K>6ROZ8'*K>'J($ Q\mH[:Yҵji;ft LɄCx47 $P2) Bx '^4&bhl9o9a4A:[8[B҂=2P~]iXhc`52FҒ=׸i7ػ/c=/{w6tOI] i­ 4Ŋ+(r 3eaಛj[7V \En9ܦ_6Imu ZI]I+!o?po᳍*دWVƃi)NcJEdq2̸I ٴ[1`(XgUJ g]<=o{ɕCyHw5]>C=[Y$zz]^vFVn E.\Eft gdHRt joϺwȟk;p ` >][( kkG58"5fgiøŷ?OmF,n,0O@Y|.83ɶ?;|98|NX)p =9XP.ME>6à|8sv ^ݹϡ۷+t 4>uIfv+-]8]`̯r87u.:ċNH?{"+45 hJbR\}?j>>|#6qvW5y5s%^I dGTLdcfk?>cfeXi̍WylY 5lo=~VWond?̒q v¨/;|G-^Gڅ0Ǐ90P`cg#5(`|3cLG%*iCwo w>_]m \e9i#^N<~\kM'5߷ b2'uRNMq}SEAR{/f ػk rַ/;Ier5wڎ]MڇI}5˯Vm"a4ugf|/o1ү^]Ua~wN?ٙcʊϰA˩_>wenO#QFlQpδ}ߚ%޴EA[Ot~q5&c`K᏾ts> 9sAWvvHy5;t9kflutT.19]͜\9 2;St }zd^\(7syǟKdg?קw@';3t0s9>v%V'3H1,`#g狕ߩ.Egf-3f`v%lfvlD&*9fg^v_· ƕ64`^p zdyz<9egYHܟ/*D(LzcψD\"'KOc / g%<Xҟ`uN>l%>Yt)d2sIV{x/Оi NהP\AЕ[1磴R?tBxT@E Z@Wi>`U{>wy#iyK(zjOLpQ)01 ޒ?-6RsEX-|kr \#w^i[sRf^ ë?2;>d4E Ghdf%*\oc8RU]avbF,Ae.tXFquHfk/rz3(9uFNX ^aH^%&7S9d6g}] -RYGGnk|4M G&*#!=u =/V ~6 .3eھ,I#N0tx{D53@LivE'=kf t|.[ +j2z8Z&gTkϟ/ʎiDOq E뙲#BI3s t{,GhdfVc@(SR Ç2O>I=qf6X7g*Zfc@E{iEkiAmj!xC"QKi0՚9lsX교z{Ɂ3sQ1|U:e/RkZ*1)L #.׼6>hdf޺=T0ɣGhdfamvhe:u=mf. RI3 'URaDI*|:aÑZ#] 8R4ِ*@06ёcdltՔ锬kn "94f{h ^Q=^n#x;D͜Z&@c14 nkjCk>pG:W%s N-yձ)* :*F*:}6GhdV[ȁ4ղ\nAl{0}_*'*I`7Ha.Xԕ"$Ghec3gYX(~3=|4"O©sMCJf@d~㾻FffYNivhdf]k/#42G+z`l#Cƒ=VF6z&NMJNW`NYh(L쐄ĭ .=8=%ĠйSXs0v7}Fġ3J:ޭgt26S >.cqTmƆ6z&N Ġ* 0=\qqC}+~C=_g}̬U/Dmi#[z]#{k6J|.k\MZffTA2Mˠvx/tw;0^Mx@'^QϏc?rG_"+.e^\4Tqa,A[1Q/nnlh_`L{}|:?lgju`F5DAQ zH_RZk QU!V763}:Xp$HD [IKHXS,QؿSu AVSnCHku)#yV~&$^;_(I&aWCN|!˽ 3AMڶ3:0yI+n !4  J^W++^tj}#PCZ;m s&zO`%"Al~N::M"~cmhdk=SN.1>iI+Ņ2J0vڐ12l_j2޲q &ޮCIԇ=Z mL=dHl'!ksz=ӡSf}*.u~ ϷQ mL06'3_J}n m43q:jzy)FvElkgʈ1 >-*nC=;8m7n´~vwuO+ԧ <qoCNXCz᷻4g-?I ɒS_HM6z&NKTx.Sm'yi9G"4 vB- (22xэ:+h&6par(yI/5Ss# T*c8ν$L5ZV37@jff<7C('d yяsw<1/|eyo󲼷.yY |<B(q20QY8B(aTI1m4 1pbX1eGf+Ÿ#2ϲf{|y 1|=ej?sצ_Zc`ر>ٝҰWD>9~| +Mn>"TˣS;~_wʃso}!&&_n#꫞8.8h>ie2t:a 4-NEa{_x~zIfl7el4wEkW@ o$2+0Ѣ=փC;+tl=ÁOy`ˢ>٪~{O lY3/ O'084E-\>~VEh-u6hJG0h%(N+nZn>l6魸O$^ol뭍]#Nq*5ˆjR&ν_(H#,\m+癗8J|l2Fp=-؅`RUT*0Zfӿ4P*R-k5E]Z mK em(^E*7t`%ϗ ޳7: , 35hĻ Ю4fmʋJ׶v9E B`jo`i:y{ӋHW0jqL\2>`~,c~ǁB1ΞYJ?[no<(M ZT4F # PEQX]F*9 dD!: ;BRD$ntmnQ4FQ*44Կo$Qcߦqԏ)v _& I*#Z/L4,902 H Dc0XHAHGRA*Hp" RŔlC~0h+#1I$hQ Xz$Ik Iu@QjdF=6vH#!$ #PBWrKFHxIAZ&ӋBgb0NǙ5{< t 6ѧg9á5y Wd]ċ[?г IhE*p;F]W5Gfs \C醮TZ5j-,gE+w2 d"¦-zczs7,;i޼_YoK7g`MI˽TTMVX_0~od]k ƭݤz!m,'9,2367C{8zȇٷ+#iHƕ$ jJ8Ҡ%L' ,4mp iCj<3/Wݩ^ع7+1ϭm$y!ųn`.7s8]L&[ڹ.&i1Sk*)rkoFe6Ⱥ^־sꋹ-Ji}-8bճքFZيgY!Nj֛(S/W 7u|YE}a5k O57tӾեR-{QVcwgʻs57wx{i_ǫS]=ZL,H/#/<׷V]fP V[ :ZUW,.|CL( dȟbي4l^fzQSk$J0> BzCb0(0VYH}،1$E:X1 /C|F2_ m"$bPj^@ep,%cXE" (TE:@b*xCmm"~]G >|pӫE J#KMӌyjhܿ>"GzD׈gQ#6&}| r RHc{%  K` c !TB8 (b&p6+8ùw~~ku^}gh=ps9;p;rwlpXwtbϻT7B}LD々Q|Ji  V$u;ea,Xzct:"ڦ!QYa o}sWWKʞ\Mb& R|"+7޴ mlFոV$}qї0N|ڧ;o%{a9{qwjQD90G,} r`i+9Psؒ׻1φg<+rG Tv=՝_ N;/CFgs3s IˢK %*yZwWs#Y\ =nzp%aȍ_Ղ_ 7;V;d 9bWGbл:t|8)Ksz'La@aRͶEHƸ#P-\k/iae_bP;z6sߠm| !~kBv6UtKD BٶmhG޿ctAэc Yoi߬DO 9JZIH*! I$=Io[ 9%d6|bpֽXtg~%# (y @l:2vIIwU# @3.~OuFGu8GGv lՓr_w0y8 (5o*. 6SdחNkFxÅ-30<d˞%wYm:kk~[XsO_?"c`І6P[XHzZM#; d),ޡXO8d 7&Hcm-LG: M) $2q9D1gF+JɰT!ܝ жI+^WR@+*p̒lߟo8%ޤnW2Y>5gxSxQx;\} F=p;ۯ%^T]SBGcHAXu`#$L؏c}MpqD@F#5ƦBɨ=#bvݩ#l߮ޯuWGe RP A 0P"1hDđH e&6ɋ5" w!`,e!XD<P276G!&A':BuB;n_ #Cۓ)WarE|&}&,m޾]2}Fôk |*h~QH%\6iP!6p&1 CDB ".Hxjh2q/Y>K+!bMn31"bD `ʭv_DGP.WvnNde kڊ$s P'qEVF1 "ƔH# ?MښұctrA:daWI8iupue٦hXTWֶbͤ2ߍM+~*qXQ7xX}9Z*ugȦsfF۟x2OW+^TZ7I!1@aax12;b;x656a;"*Ad0C0S5YfɧA\$wA3o'*TX1 ]Tޮmޗ"-!JmtY'o&RJi{Tw<lnj {O240sϪ/A=WK0W8ژލGg0|]{",<-Yŵ8NⲠ-VtȝC3`>e@hkK$ll-]75Ca:&iZ\Gdi:$tjs|=\yUoٻ6rd*3{e/rdIlkcKI:y-vnږ-q*@X"YdU.K"zmU xt {$u,}~̪J 'qD4Է;F+AT5qa4|\@ͳd_˒ Js4ѧy&Q~}T&Rpo5q8)lAH:˓WN(3';>y%ú,<mCmue܈}}RL>>Inǫ*٪_^g]1}#`]j2]5Ţ )0FUXRD©Nd+YX h}Pyp-g1JMͭIRZ (SJW 7mPzV1r+< QA+u^Bfc*Z̢+ZR2bڑ}{{-쩡oߴ_n1dvϛ.w3M,k+-DF[v@#I1>#I1#I1#I1#Ijp˞Q9J`]JbUVx9%#4P6Ws\+PXQ:^TWhٮ;W2'&gWW,5CU b*b*b*b*b7k5zFg=g< wqOh& ޝ u9+vkiT,5OV"1=KU VCӰVv$p^M㣻Td匨Y~H%Ik'R(U4&yatYR)J*R5/b~u{0= Lagn }]?eyf6''2S0Uːr՜9Ve.wY`do'܍C1\n@J<ogbӠ=}\qӛǽesR.CڐRgɢPSp>;l. W>.O|e=Haރ!Eui0dX޿ia:\~N]-<}c~9߽b&GYOf)֖RX[ kKam)-֖lX[ kK!cm)-֖RX[ kKam)-֖RX[ -VkKam)- WaU|Wa*,^ūM,(5 W!UX Wa*,^ūxUX7aߴ0E>r0ȃ$ڹHYCW|x [(Q#vQk ;p>X`ÙÓK;pQaET4Q"*" -N aXQVDŊX+bETQ#ԓLKݧa`BTAE^0^QJ!p< s'eWJ ~{QO`Z33PcyPJp]B֖ JʬbN Ԅl+#RT?8 8񏦳ѧ$Lxlg!wهw^NŽ,LᬊRe.rL;lYDZ3SFGfLzS1_9re[ճH8^h7@rTI)SHQBaF [F aF !1 @aF aF aF a3zZW@}2@\|~>֫οWV+\,\mȇ3ꭜem<"6f5yZNQȗm?.r6{@h};whhz5}yzo~K3]>JPNw2*W|OڼqZRZl[VukgLwco#c׶$1T[ Gc 8ޘb 8c 87I"lH't2[$EuAl/E>5^,>̟O<i$6j`a_9vS*ϠIITvE⻣w͗{7,*7)|o4㐤'p`´o1pM50d-! pG͓27N>-<0v}2¨vL8;|:S+ijw+a?ƙ/?3\wd;'Fܫ菇.*zؕ9ӓI&)Lǚ37hM7`Lp nա'% hl +hf'zfd>u>ΏA s~qFNl7Jv(&Ӱi&pX6u.boz/v{Rɨ\(u<~zXWĪ`_:7Rɖ}\[ʩiR i^3g.W /5/ZPiuKu IEj~zQ>p4t ?Tƕ+-9'AEiAE]jI:ƶ~Q١4C:$А|\aڠ5gR əT9+ηm8aě@u"p05u%#!Yc0֋g 8ܢ ڠLA[ZKفYrOƏk%_uU0pV*A*9Vįyl:׻HQ˲l;^jrIP\_JQ &va)aI|U݋FJm{ tA.)K%W3"Z mԆ 'JchJ5`#De@.ObpwݏC6 *W)mV؛u/N(h%"<-JJ$,_)jJWD( Ehp }6RA{m|Z|8$<–J+y0{=BBQW,xK;x[J I+GígWS81VVf?EMJp,ݷ_1T׭;)(u3~]vcמx$ÈPde?4+.Fg> hfb@yFS2U >7`937ϖ.-EbU!'Õ >üIc*~ 8Ѹ~6 $O^U?w'{NWYXmCmue܈}}RL>>Inǫ&?Eկ?1}H;>|qKkj2*9%$B-g69R8a"񵤈S띖V`9(cAD)IZrcZ)  =oeU OsT`f`<.X`BsYTxEKV[y\\L;oo:zo<=5k׭5 w:ynnq}%#>[c v@z((UW84kaGFC$fjmϻ TjYmRNbr)b'|ue8Ǯ'5b [{Y(NRq6풄Ut~;G)xB.6ZFd@l^Ek '.`4w>)۟Z\4N>w~(mI:5/f׭YPWdn@OH9eIbbXP%_4:XWN|!5V,'7ܓ" 6ߣ>z^_C(K L jQ:%l!ymBx P\98sM2p ,)B[Ec5`AU2/2F%=ioc7E6"Yl f`0oG$w'݃[|eɖ^KVmXdݬ ?B5W[#E*&]Cp<;{x~ O^`a$JD5hUDΣ hv@hZo=G`%K>ݔʒP#E*R zF.*!-8_ȶ=Im{aIfWBD!#s|QTM[ %1!4 l mm-ҺslyBINvrՑ6i}NүZ/HnFSx1D^<ҹ#$%!?B  2|2WD 5(Is;=?B +kyCxbq=ۄE_Co,tr- Z+1g@ocZliz/b]uk4x>.G*=R$ aHM18{]I˳Iq4G RK$Ƞy #HRkȱRE%EHg=,0q3Jm+Pz3]O?۝|CH93bVT(3f$%`d'q xNGޕE(%\oѨڷD[Z }>i}{/. ikcғ$ Y Iݦ!3U _R+{@hZYF ouI]D-3Ě fd'%qŵ$϶#?B AWGps󙖞}`% kO=Q ޘ61%90$8s"!ZؓMY+9^ּ "@eyuNwHRy#Z9[c!²uE(= ʆ@hrB; ɫH<]d]\\]ho$Ik^^uK5%cdF[ fT!(xdDrgzk|я+z'?!vv0ޣCbQ-77 _V,vw7KihY(MȰt#F}HMMm=>',/g@{60 h(u,yn?KP =C ZkU!(/6 5rcSb LaBM)3C @%f# hjLБIb %c yT͋urIC&R-x΃g*8(S'a;[ hȬ"ٲ@e"G(NK!Gr9 g[ǀ 9q-۵#6@ᇓ,݉>p~=ic`d盘jlwQs ua|az6- 3aݒY[757eNKTC$Εe1B?<;MDr/Z`n$nQ9/Z@1:JEc!X_U۩=o ڈV4N5Gڠi\?]whOsfB).8xEe{OÔ^Gq}W7G=pD =9; Y^qb(h;!Ah0EAtB$b(DI%t1q!.- ~u>'_רBZ|nj1^9P0-hR~O7ggr#b$ORZOQȵAd X kjўLBQ&6Dftb4K_x6 9E%O$'W&g/%\|^.jjr6"g8!Q;pf8p_ 5^̟k_ U+rnv֟V#El[5Cʶ+꯸n|_V3Ÿ[Hk˿nSV<%Ͽ-k@0x*9b٧VhzwlTp2rG,Fsvf\>47F@2J77*tQ׼ T3uj{$R,IQ Nrb^7`<h{ъ1<7{ ޏvKxk*iAr#Tȋ(v*Dtgm XꀓlQqD)7!*jɓK*u~@j6)Kl%9qrmU:6!ԅ|XJ##z)wz6K~з$7R C+`hlI[A qa/0+]9bP"92IKN/8PY3$2)wZz98"{blȋRbq_ʏA˗TQ9ωYS3 ^C h EpMW ٘uqP>OHOBOH폥0RF19kO,)j1B;b>G`э/A##=:#/ tkM(HjQo}/ʂkl͛f-^QweIO%y50=yB)&ԃY:CyҾr:ŐaO9%!Z^:~UiG#ʴWHk It$" S;NmMxSsy:NS-;غԢaN'9S֟=7%9p#+~\ 8tZ(ywq!rw+@51dtԿ5Ԕx2mI,$@ dBpӧ|o,ǚ˻HZxei@PA:3L$H Gl.sJi&l9Z9V F帒:r :|bS,T9D?o5͡lCO)0mj9bå=w&ʭzGw:>`^rL{G!Asr8CBJ&N֦ _zv' &1$j|sRꝎ9zM4 S$IiF$@+u$8J ͡ I[S] v7@l孠s qjNQIyӎ!@5eTEC^oPk'SIo8ż h!*B}$hyA X&kM $F!sm-cFpb8N*q6ĩj٦rz`F"&ѣxHFFgmSt/(|8%%AV_DSnsAYdT695{N 5˔r)ˬn -H┢xŐ:F3baulգ>XOIȏjY;{ >G{׮_"|()f\B2r $rT&=ڊec\&@qEuck} "q,ȽC;X?+=>~-1i7o{qCp8,7r80}P]s8f[8^⯼lxH{ͨfc"fšuLZFN51kGE. ^SI Lkp T (| iϑ9ťs4^i/޴c!KCi'l+z8b dddV~DM}@?z4Rvrқ/HQڷMֆ/c*aZ Tr9y MR?hg< )OJM=TŏC d]"A(q) ZIg/ ]f`ҡ*+tJ" )-T.FUeծ>t~.E/u=wwf4MhLiRY6|gaC=dRƅ֠M^_%,x# 5/W8XA'L\FBtcXI ƱǾݜ~jNDr\t1F(1p(1(hwQC =#Xnb<^>Nj &D#k6N`XdL"!CglHE>H)xZwxo -~|_:,X! n3Gc'G77.]Rvʷ ki}NٶHHHKXcH v<9ʚ]BͮZ8A6|5."hai)|o ')y`NEݢB]u%ֲ$at\`R c; D锨C.Kbz֪]̪.Z'fg9c=Nʩy/?x7#2boޑ~$鴆d'E>/||2b69jRY_ Gǧyd9ff %p &N "BV2#ꐯik1u$}"|=u͆_]r Qe 0>t箏ʋaW.qoHNnN3hښ"uI{ST)Ƅܢ$Ol"ƜZe⻳w\K H:eu:er,'P/rJeoJn"wǙbdCLiۼ*Ś2WﱧnWA_jHl"ֺ5;5s͔vcv۝{&ԅG݈ؾA7 fw0l7p٤_F|O<];m .HG4HZ;ÔQ53&_^SGlX;lߵ (^]1@6ŀ1W 0ؔPqsy 4{6Q yi3%h 2ZH˹06z=Q7z FY^|MtF)iI\o4⨘[CH ׄFJ+3M%ise@~N \ƹ1p%;ʟIꊼǔH-En1p}C\Os.#c$LIQG?dZKJS%!Z_V]ܷ$,P*w56H WƁ>PBZA= RG“"Xs/{X??\k\t}n]aݯSξ.[`]b8ūLҖ4$1ܜ$IJ#=5@HR:%9Ԫ-D:υfB)HQq\ଶq. (hP`HlYxѕzUel)k D¢&-D!AHФ:E IDt< ɣ9,al8܈V!yqԹjA :YO- h[뉋haƀZΈ tZa*?iz:)HL+v7h"Afw@]R;]p(Zmc'cշ8ڀZ]kDPDΎW]Xb|VB[[%eC% L8< C3rN +@_:9 #))O<)/*Z.n9/zLz0(gJkncDRnaN6D!uҠڨ4'xh; 9o9o n+7ߝWH.OK!*и+ը ,ar4^{f$1A}e =eɃBOJLB7Ze71hQYpQٻ6$Wc.<"/L/={B2!)_Y@8'3fUֽ[pр!O1s/A6>ReXi"h]٨X4EO"%  U.jiIqL:^x0E1YEpjXiQvq|؅e>Q 8{$IVo?&^oo>EJSzM餻^YzOɉZl:](dC!sGCV 5PD@HoCg-e K?'ٚIR9E!qPN_-THƱ`c)儋AR- ]{)CѮ=kmev]wsnqyks [pm`;8vK%8 A1T'mc"^6l-؁2˾ KB*dJQ2Hdi/qy!.$Gdmh؝Kѱc/HnjHPNM򠀥H.8!(Q{pVQyhCEӊM{LM{J"Y<cAkqM/p&Y+zrC5G6ۭ=ߥ, xO-,.wJ^TIPy0,oX{?jO:ѳ7f_ë#VZ{6 y+Km%xXxHՖ>üra&%!otY>O?EnQ>>UeɁMFӼv+#߆+C ݽ ݽ ݽ ݽ ݽ uP ݽ݋v{{e-tcE ݽhZѴi^^OŘ"HD]g Ph7ZB/B/4B/4B/4̩,gN3ʙS̩rT9s9UΜ*gN3ʙS̩?.=Wv:wo_vURWB\NXʊ*c\ԶrheA+^ՃGZh\ȏ{ML2r@H^;*j$VўQ2P,R(z=E7gy mpAW[fbva}a0j|@SGi+o2 ۹L5ɱ#%x5VC6ٙVm=<cc MKk+A'*hpbqh@$\:L4@!(o+;Go/ġv.>#\5lſ9c' I~!hg}j߮ZbZ$Uq$NPAx18XE՚}L#X=YƹIB3 L$xZĜS)O >֝~,D9ܽ%>EGGbR Phg.(4{^ɽDuYC i#9 A[+v)*ɜv %hs!c})㬃BjK3o띉T~+K"ҁa9(#6UiMr 46$)/R4 mԑh\bS,T9T`k#*'D4m8RlD{=_ᜲw䕯f76T}H[~iD2y/7'eADO&䬥^rL{G!ΘHσ.H5 ЛAL8润d/Ou!$@Xb4:RꝎ89zM4 S**pDCxbb@O >$IiF$@+u$8J AI ^ v@l孠s qjNQIyӎ!@5eTECN^WVkPOOIuNN6 h!*B}$hyA X&kM $F!syeU G+cףF٦\G`dS;k|BCVmw#}iG0z#QT8FFg8mSjA" /@2-) h[{l8VÇ/zT5yjʷzШ0,Sʥ|X[B )Eы͈zcQc ƚU;v&!ղv1z=2HU?L*ah{+ i'FI $2dAH(dM8#D )7eV_amQL"U26*o)x ed(Ho=.yw.v%m-g?~fiW'ۂNePa'~\aԞJ <S]%R|s)TN8S%&Y̩ zQ`={Nֲ$at\`R c; D锨CΫFȩAO`v]{09-Y˽;~4 O͇m1w"' p,PS2nr޺>Ol=I+YaQz_YCrK1KhCx%eKHQjj4~xr:ЍAJ=Z"A h!+ÁDVHu ֎zXgh4nNdX4:q}왕dS\Ey1zz{CErմJ1hښ"uI{ST)Ƅܢ$Ol"822FdIQG/dZ%)IllIΦxx Ң'$&zNG4H6F[#֜ Q (?h|T޽y&A?{϶ȱtR/DGx/g+#4Hƒ~ZA@@3EnUV2*3cFyj lΞE!pX>'HxlI}B̕V ]JL #Ǫ7+\6H/?& Q7کy]8K5]~taltA.Qn}d^8.I}4r4<La=4auC4Eec5Lfp`Lv6MFG7s>˪4`4޿-/ v#=*y}f9ASC=WU~Ձ{'q?݁]Uug샦 ݷd5uA9h:q X*زi"^mżZfqQ~gk&a6ȇLEؕL~Jv4%%S-}j=9ASCW0`)Ugrcyq; pCScZ,NKLwذ-dvJ$\I$0D +–kj.TTL1A*U{QNm/aw9O-txrkD3IiZJ&@.r'4A2DbR;?10D%RTi7 nTxi RrHUwV+!a)[ و{Sq{C$º;}6hKlĈ"KJ,5Z$t`R1%&( (9sՌA1ቢedG7"9 Dp 8 Rfyr^K~aw3Ss&8tH̩xF[_LfY QY>FĸQsYMuH<>oM!閸 QRO`=-.L偒Bg`R=G~},8)5m`= -S`  қ޹bzf: #;MgG0_`pUtb !Y2]$^O_VgB5KX|\ױ0w J~&Z?Me2=[r5a@(ĸzi >#8&[kZ],^O!Fx~p<],?84̶Ls#ePb[op$*qa>'~kCC ҅mFM`H3l2)ոOŴ `hJPWlڴV)V2s`Ɨ. =b6"uP|3hmZR0r\EU:7AچWR -gYч*SX,qP-n0!ٻ߿KOgN0Qg':9;} w@YSk/o/x9n 4[%w\rܟ5\7 Qc:H}P@E0}=~SL]:{~_NqNںv$ A!XXJ#SASԄT#) 8M`$;ڰ.e->ňa6;ypj'Tqzp(MEZ[ɢ`+bXCVua6}o#=5O 4 . aS/9:*,z]]/%MGnӑt6Snӑt6MGnӑ6@nӑt6MGܦ#á̈u;rݎ\#u;rݎ\#u;:C!J5s^n_} imw1Ho C!fH 4?g)Oc.*5X@U6ۂ/f ۷5Nn_q166jlZvjksۂ 'j{<Ǜ=!Ad#Kz;u%9csQ < .UQ;eu(F?h"kL-+Ȱi1)spg3Q7 vP_R~|/t[&n@;Q>u\ Le WY43A#^=rZFyvО?ta!H]YFYBh>û#ubXhǺh=AJZFͰvB:=CKnb/uwDUzd{~V<uw/[}U`r9q `4A$;ch ޑ_ ~$`wi-{:z!UՁZLurBuU 5Νɮ}WMJi6(LlԢ}৑@!;"A6BA=@چlPnQq u̝Obm6.F ̈[69]IT T͠=ow0|hV\I?I/ǣblSܘ9VVָrr9r ʻ+r3aUnպ 烪T2LLj9+j$>9i} *%{ҭtS.UU֗^*KmVUaUV^ Q݅|+`0+ں-|Mr:FuA|ΕQ_‚liccFRllw2xwew?Wx{FO̷JRq?s^` rH# be}0HM-a$EV 1';/M=OnPYOX4 % -ݚlέFΗf>X:zs/I[VNcF1ZleF͝QFGR :d`:r<Ŧ/XFf0WQC*"1+BUG*#R"%1dNF03,*͸JI`Qf{Sq2,Y@ۅu'wmL#B 6bā|%%Dc-:0EarfWau7`ibE%#ȐYoDr&-l/40 E!`k qvR҃ꝗN*%T6z{?Q`9J9gIgĜhwN'Oۣ`?K!*kܧH:?*~?u)](`+_ko \LUV ~sTiqa*e<+Um!Li{ ; " DXq$Sk zz["A7As)(tFvxݛ@zaտ 0! H ˥CRHt5mpu~8(T4}5ttOẘZodarZ gI3bt~s; W[rzi >֕U'2b?Po'Mld<7l12_U/AWoa~`5A"!f0L5dSZq iW70qi M1FXH́}~_4|q,.ƋsAk֊ҏŇ8~u}2M:7AچWR -gYч*SX,qP-n0!ٻ߿KOgN0Qg':9;} w@Y}4_<^6 ɇAshM4osKW?݅+%knkt3]f>`ztpzA5Cmܡ]''d5F2EDvFjДh5!=€NҾ +R[8fxnggfyb0CDcԃC(cg`=,: "fNE>iPgf ٷS ĭ@1piOڝy0%07Ndw⍦r㍦;NU_B`Y"FKs83dFjcgmI %ȗZrV:AfxN-:[6_~*Czt@H9sVk/1[,*2VٜœE dvW٧=|sf[i6^ɤktCx]Yo#G+D.#23R1k`` `d4TkeR*RKbRXɬ8Ȍ}`),EZ $ @l,y!|}ٶ{KKI,GnS18-Y$̑읕6^rj2{>KW4}ܸ~YR볗d*2۸|b]CsP=g0ukzt{W*$-".i<[VWݺO]f ryKĤU3zMl FOOf~yi3ύ\%w|ӫEyZg2Na6!~ϵz5?9iKc=O&i:G{?=xiuƴXWYwx !<ϵx3ʣhy)w6'px>t)$1.8VɐTA&JB¤H) >8j\_RiɉywU 5c 1fVl.EIQs-K3LR؉KזtD],_Rsq ɗIhk.'~3rzrZHJ{. &R h$0YM`u)` zH²T2 SL,hM6{|*@6I'[,2d"gL Hn6\F"AF& #J.dlŦe/iOI;k ۃPt6v[g>8RTcJ]f 4LMϳ6>55R~J jniln^saJ| I(I~<$܎GEȁ?~r.٠OW z)v2$ Zy̻"+/#sJwܻb|v0U&f la 2oxȂ9 .@'T]Z;gy\K堮M37HCFڮ74zi{zTbY'=ܴ9.;m`5Q1cȱ*^7>VZxs6,]r2+:@Dk*$T8!2'T(G,G 1噓o-P(;nV` )DJg)ˆsxpHN6]-gfҡTvRunQ.sCʓPl\T8*FyCcF!YLC#Ȉg(rC,%HjPs/InEf\_rva\fHl"!JE#c@R`ȺߩʙcI#ЖqbJ LX&u"+Z fZA}Yٱu-JaT|*?D䃱@!4,C B’ #?@Lj¤!b/8Qd6DW)J*(5gQݎԛO7Kɩ_g,rj·MH+Jל0 Ugyc8y1ER<=^<[TvC^*c0_&dt\LFCzȬj+_e5 HCJJhq. W:[|U]cBfLbJP)uuEiJ`"*AD欯SExɀpF. 07NmP#fXiqPY! U C?%b wp(Pp0P)NRBj#  ICJѡ;8!F0)ȣ` m4F e@I.3_`! Ev%$St ;[fPH:qɟXox, W&F<~עE˝w{n sqxYBǟFNJR=7h"B0M= SQۭBreA_=~x`=ӊޕ{*'lڙN܄W4-ֆKZ^ %P鸹A?5ՁpШ?gFghACKໟ~4SRj4v1x;/lxЪ򽕩~oߨUKB›/g_clPszk>?gM++- }dHBl%![AF9*& 1|zŹ Ҧl=Qx@tYH!EǸ>YWi]ȌkcmIo.$^! @4MsUIM7-jifo~vMiLgkz ߼9xg !Q%ύ?2G M0oFR4,w`dBJl;-Y阖?_ ZTU:T@V*!:Z6D:g!Vg yoXYεdv9h9Y$fw#p~D"y̆sg(cqfrP’4(湧/xθ܉h:T͓pmHkHC[^Qby=ɭʈ1*dhU4`Y04ZmS3 H/9HhȤHbUQ{MaHD%5U0'PrFSi- A}&4a6n kJUr*{A܉ $yieeVEY%<&8B֦6Nf'|a<)+ 5 1m<5"He3zudcJk$K' q)&Grd簄c7"F"C&Ev,fmDDZCIPIPI%'QͶ>[חoy{JVvJzI2nw5c˝>9()H#;$u,Ond:g  ,%o.ZG=ĥYiɾ^y~-HQ}ѸM1-MDu}wCѪW߼_o>܌>ݬ!U.O9y'Oj թmr: soW,\]jsR lrْm?_:2I_Pe]pzח| 0Jb< 3GWP0YRؘjz:[]Xmm{l=`}ݷsm.Ip >q1qcPAKGBs]Rs>kgˀ_yIp@);Sƕsl!-j'Xyi&E5f%2QClik"+x&%͵czS?q=y,'<^ѳyOHV:|9:Iջ՛/ꗇ1jR!G)NBl![osˇ";u(%Df93ƥi˭@w8 H#E3փ!rcJL@ A+QVg Wހp.)_\O7ul8j<E1+/fSoՠD $U;.+YkɇX639^1HTZA*r' 5(^<9xTI$5}4hYlkLú^R. P$d4R=*ǬR&S)YX-vئq(}PM*>u鐘S}]$/x4 Hr+V(А VHRqުtZ|\ՌQӒ1XJZM:`Q݈t[X"/A.p^7y82/\FGP!g&y`BN} T &'aD3'o*pnGI O#3RZLj0ZM0ymBAYC9aDC*+.D8GOfoy,+={Tϊ1J$4 ӨюjR0LgJ9DsHb#BzI\ϔyOTNUy-$m@xU4Zd(bA04jTM.c3Dފ |ѥ/YHa:G9wrff`Q w" -P VE 200IU#.XIg\UUĢ-L;F*Hm~ϭѡ>(F쎍49<$I&ǂjŠs ɹ٣YvQ9oW#j}g}ˇ?? S /q/ŷV E2Cp_zvI̟[ AD_4=w[3$E w",g&\\)9uuoEjdh*yDg#1 qtQt1 4՗LAqؤT~^F60MֻiZ4f5RÇ[5\vqwjC4*ZFo'Q}5Ma>V4kݛ6.Fq{O?Ogo|\f&,na{.9zI$Ai֑@lm>4a2mQ#p.f->7~۟l[2}"Fm[|<H>*uR/KzA{!NCj At0 ۻ|óAj(0+=ܣvZ=_fVy<+m)K|($?~//˧~@=g|.Y=7 Ϗ?>Cs -JzỌkJ.e+gqmhDfAL#9jC5K-';[U4]z@#iޯY@3FCi qpjU" g f,{(ihBtlo\T |?GJO9aS0hPzs]@w e]hu4}F3'iݻM>R-}$N;s7i?2kϑ+lݬ?w9tP۾Mز–ՌZwv>ӝϪ'R늴7tÚw0vֺYOt<+PE"VwAϛzμ[lS9j&e-[fM%Od+Ŕ<n0FL˱`po OzK*X S1AXg0?3ya(bdW6jsYkDLUZ$ûv9dO:O?E傓 gTxnb28he I1XBmpD%eV-(]%*s('iͯBS!(t"8cMiW*wL;btU ޢ3>߫'KR9EC┡Ti3PN) JAM(?| VTy];rʡ> ]nϊoizɀi{ 3ط,.$LP&10L>&.!NtZ qXRYﴠ)G"ED]|b@]U7M'v4>!&Qdдy7#*osJ=L(?e ]*^ˍV_spbBM4hy8)|]>2A(%Գ}ඕ,.a9gB pQe=$ Kŕv+M@!!hQ9M]<*H\@D Zhf Jq&Τ&rT@ dZ extNH51cJ*+qh_D AJS ĩv!|p`2Qf$WT "++"@պ Bm`+'A?SH68vm"qDIȩB1kG`5bY+S2-4/Y@ڥQ(Y˵pAb"A8, &:}-yDsz>R ^mɪ5OJ!m:^]-4iU:::2J8:B5Qqޘ5+Zix 7Dv YZEP 0pJL̩ew %Cp~vVzIr`Ϸ^C]%(atJJM$khy:%* $"ױT-em$4;d*sS=xE*`%1J$Y=s# +>՝:ƻNh٭|n.ǟ'-6^ӾJ:>|.FrbHbV+BPzTz,IFtDJY4*[`,6ʌ ;6`#*$;7@Tѡ#8Zg=ߩg=:N=bSdٓGhGyU6®,=Y`mߨO&{i|G]k}=]pg :]^s h ՝R¶ӴiiȎ" =*O gOh9>nsk)= R"|Ox* ?A<#5$0D QrMSI7&% {XbfxޅHBUײ4{-%T yyũWadcaJx N9"(C!sz3BG*#R;NcRYURfi%SE!AyT@GXV§P$wwF ۠F,e3ژr>,1Y$kЁ)Jǔ3; wYAW3ń'JF!-ވdLZ/4)0@E!@p ¸#v !I弖l o|fV^p MDk{9mbT6nj(l;8r%,FKA|\J{#L~̖4Ó~\u|p 9Z\^O볥z~ N^+')pxjZƕ{eH2L5.,h}(L_OٻWmU ZtM6U0t4>\ fm# K;'I+fҏٯW W?<^C W28袲Mm読 {E8sg ?<{sWgg޼?DW e\ kAv-_N|iZ֛.Mdi'ɺ-7 Bl]w+tSH~P]@EP}5OM{x/Ѳ6q]?a#%d5F2EDvFjДh 5!>Az$ 7Imr>0[@5sYJ&zm JGS~jy$ 7= \֫/q*{(lג)E 0û4BlaD=Ȩ .·|q%_O,Av*՞T*ɐle˯TTHEp9Je)(&蔥]I׮@ە#!Z[-rX+V`U k=aNȩ@;ul&C0!B*Caj4f)."%b=6M xBZ"ZvΆ7?V%xt]i "E/RW5Ɇk-T.ruɫp."OLD̮z"7z0Wsڅޅ7tlc WVy$EhisUŤ5ǀ9FqIgcքӬ&9'غ'O\ f^RǑOW_ =댆&x0cޢh].vwOnz|>1Z)FOvܮa cjPY:|OB ,hS.>fT6$6$;]oY]gpzhGz&9 ʪ %4oh> kS+Xw-~[pU0V ʩ /e2n~% Un-[gѯ O?]30Mﬥκ0`3J>˰OLJt̄msGu" & "S>|am&b4!F0ܪ50$5I[XZ(q 4G HB9UaaR"Zvz F}D見"[ÓE4.+bG4=}Oo\$P2zEG pTHDq`7S4/)1õ!់(kCZ _& 7^ץï|qF&exN0+D{ґ0хTm{:)(,r!c 6S@ w4H &$ڐ cA+ȁN1mP-XGc=# #$$5pFBbN104(@%LR#1s:5;J(]RړE1vOAĢIVݹwA& 2s*fБ `0^~"w^,ʗZxgN8.]Q Fg0^{Nnvp5CBP9ޅEU=)6(­𐕰?tBs݀~VW[mFo){}/}1\u;蠪7ox SdB$?._VR9%*^9M&ӧ4#/GEYX ;}sV^N"p ެ.4=S: R}[[ˍmfEݛm,0}ɛT9؂-1%B7ёB}N Y4~8}G+.E6oL)]aT:9r! zWh [@q^ _Щʇo@"@8? [$Hi=pj^\ q XiGeMI]m%H3osA#S'`ʄ;BVZX='YmM$W*&7J:Q2҆`bI7_lj}ʚ L@^XT%?XI _l<"F*s;xxT2@N?N^/^&%>Wϋ ɠQ/E C'Ib`LNZ+<qL˩+.-u ]E_gb| ne#2(9ȅoQ3`en+OQ JSG$:~f.mЃQXM9DrDr} _o_ѢAr[ע_nСVm<~q׶lՐ `p!7j9"gIaYy)06"@QYc3AEc5Y;\ g_/_V2k# .^~mWEl~Œ9@<GV"ryJD.g  ¬E.HŹzwR8姇!/<yȝ@Q[XJ>-L^}0c/-rTn8wR1]N Ϟp-kZ6LVm{m:TWA(}ц?pq|{3xo4-7Ct*c*g!S JEŸ 颐?{ FVȡl|~DW<Ȯ#ݝt;;pCJnHiv7Ծ -z҂r{tQhq5 8}ޡgn=tgPpv!8 SOK\?Ots5O:]pte<ܭrw{k-w~t78MSYYy>`+B{puuf`UPOCk1PUdWkP/ri=@lA%l\th dj.ʭlWK Q;jl$e4U_ #JtrR[3A-x!tcudޱdGI ppf5<2=ZVua0 ]9d2S=~r;nvb)KuD vR׾k9W,"jyʣ[7P 7o{?.f].&<{:&ۓpϜt==O:P-x8B,dR JdIb5M>s8B#56(P⢳ea]^v~:s~`Q , J#H"p{ͅ&ʈ ")/R4 m>wԑh\bS,Ȏ8Rqz[81|dن^L.xL.N/7) eRK_)垛?>t?|;Nqѐ_',9/RHZ9=JJz4w)/^pǹ@J&NŐ%#JQ!)OMbI %F+$;9zM4 S ʗ,,I+ƙ4"G/a a:*e޳$p(:ḔENqVl8q|o樄`i@N`#XS4IR Њzu%-I+JB[Pgr+o5kpRK\EԋؘGv })*JӮRbTOǗZh`BYj>4wvt:1/˳4y6ki'{-,Ƴ;m<fCܴVSH>zT/)Jh 6(W &QxiIImvDpl8ѓK_14 1 eJyYϖP%qJQ\xP:F3bau(kob/:Tatqq.y#lҴM. uioJK$r< )ĠŃMJЮ^hvsnq=M"R"X"qhpC>{Ϝ<'/6ǝ*\7!y` Z#20-M% | v:k֔z.hCL$HwoygixqGU̮L g@t1;P m9msj#ssެZdZdO~MB{Ϛe9VvtvH5G*W΅ ;VZmy'*EZKn1-f\Bwf^ O 'b =jR+­dMŬA<un̓yP7AǎKːк,~ 3L\m|y+Mcs6HqfI SW4"Tnz7SqS857qSTL%=$ K$q5dAF4VsctQnz-gnn6!z `y~q~ʡ֟)}.&Jۺ VNTġcqhԐA.>L4@!x;F֓Ӵk#"MN76חXE~[%zFD@T|}&X*IHBX%t& usk=U={J,Y(%a <\1͈WD2JbغbB#a[<.!ҲG9zn,4 SI㤎O914G,6-tB_[vaˋIyɋ{GB&|ZϨ:sǣڷ8\yȠkxqFFQ΀Yu%G"ɦdyng-duޏhhxhKUѬ(@N~_ $aƢ[`hR Be80(3Y`jyZ[)WujUF~ )ku.[i"lqK̒-E"EkQ#Wd+.e˿+27 󬵤W(^b AQ6r-do!R޶˘mNW;?irdVۄqn98I$MM6tğiQ#MW/ j#s˯s'{fw}>os'+͗b}fCw79ORTQj뼠!!Ȑ4sQd+`)Վ5㞘/nP1ث8Ë3,+ mx[u{=~ecCkyB)cmqya CЖĆkS.49 Qc?^jMt(?yIAN8B'ΙOz\E3^Y FD0 Ii$ GlN5TFDP61Ǐ] rlKݝRGdE6EBQS%\KyCs>8V2t-Wï?T#JQ!)OMbI %F+$;9zM4 S=B!eYY,W3iEET"_*0tT.ϕRZJ9Y `\jw#bi@N`#XS4IR Њzu%-I+JB[Pgr+o5kpRK\EԋؘGv })*JZ&JZ~:$ut^v/w-PZE?O` eD@"k;`-cFj8zyWL'. WVS xyQG"EIMۦjA" /@\>Ȋr;"8LJhɥ\Fz2\?ʼ,gK8(bb(BP:Vxǂ5KAG1 0:ԏ 8ɎU6i&0q7%9dPMl"4(rH^ߡodQS:t`Xh$(HyN16&G<P5!uZn^o"-Or5?kxҡ}}vkKj#ųL=AR>V6?+Vx_Kl6o;,9gUZ֝meZgbZF(*ƴ8gYm}٥~x}shaI^H"hai)\4N8$&5b] WGz \F\% +HU+&3BDLD$r=y*su)x kZhVj~tyu|ҽgB:-SRP=Ɓ ]ZjhfU??\\6~k&jJ5.Ex. jZBkY<*V@[' !(DZG)" ұLDGNVyqل7R/6ؚkV# {ǺIBӏ]!y~U^ܟ";#i *|9J`q"KJ;G$(`oHN6Ri6r:᭶&H]Ҟ-U1 'A g777gY{8+ u #sp8CM"u|V߷z/GeӆEkf8]]类-Kbyױ--6EmhbW-ˇ, c6iPIȜ|5&nr :ƓUjFU gvf͘O㫦|p_ Хrif ZY$ >ֶjPV_/)񎳚䓓ɀמ'0- Fݒqd;9pFy$:r$G$R 9Ee`:diF'E*R4r ?l^)gggՋjrG"Gr߁Ϊ>Xq(z9ރ53йVT 5G^A^A^A:}#_잡o'Xz͎#(2"`2x.Yѡpr蔕d~F%r7-%~Z kgm.7NݾKO&6׶#֗4ѽuhf=%80&:Fo4_xrBYy)f )@rc 5/x`}" Yd^}_"cYLRrbQ,[ۜL"9iIOqYΌRgW+X`Z`B[d9d h9=3H{$e8E]G+gN݊eߩ*}hb4Yfżp&+KǬUl!R+gKD=|p5%niװdΐ(L*&.&Gm!G"v}һ+pvRR0ib+DX$(uUeVs v*x>s`Q )4҅ނixcd*$6>.7\DkpIfS$*r+w1za8,9DJr Zj ߍH7ܓN'@U2Rꛥ>446Ntڢi^h;:^M*[aGwCDe CNϘm|a8w@T@ƈ:{yc~>9ɽ^{0mY2 !Km`I Š!Ah-,xGב"K((K/N]DtAXe)K k3ʘ `1!s5Zi&rm;g:u Ru):yWՃ}{ʾӝײn7 @LS$ "' 4jc!x!{-gu}obW]( O >z|`5Z+@XnYZ΀V`dIRkP; ѹrf2.c(fޞl˵7 /[Q ~ bL y͌4 WL`F2w_1ԛp,rUP1ffcxFy" -5;pྔ8M1~3ɘO-rr:Ɇg&4Bd8:bG?F-,IWs'*r#4r:#3*Ƥ'"CTӌD4̑- GBꖐ"6!Z-K} ُ6+VhL.PO[,˞ZNٵ2}\C3v W҅_g?ҴT7>(3(NiO?[VJ9"*:-ÜUu #TY3B5x7 C&$+<n8z 'VB'n2I~pu28Iۛ[|Q&U9X_D]bVyu; Ja nu!:|{^0( okϑ)χq>]Uf"=]6_ͅVz4R-$ xgSZP)2ZNmFr4>_QenڜOWH֮ߛ;/fw  }]®vIT G_gsB)26׭#=Ɵ&Qg874gRq+h|Q毨smS-ebܮB$_]5]W~5?yҢ{?p?Pb+}6m M^;(Dـn6=T;7ac . DHad{:(?~8vpؖH.\:Ok՘Di*qԱK+&R<]˫wK(_|wNI<κyM$NH%37x6yoKJKf1ZA.{g+$j|8}aߢEdl==TY} }J?ǝBiK(T)#eIF%xhN1f&t $ގJE䎋\uY˫Cqu'wr]Ww(:hԢu:G|ޖ7h@/p+ЩGhȖlcI9 Ij<6sɦs3*s{Vz4+cbo#u$ѥhr.Ǣ%J*LLk8;ԾUKy^fܕy>{Kܦ8 Rsnӳ=@>y7͋s_iHcY)Z'lVF !A2hLsi@Z*vAc-#)HiIi(ʛښ+!4M5lmjjnϊ8=P:1 YHSlR[c&;.D![f5]f KS Ia#pIa#h_h AiW(0IS"2Ă} $jT+YN=W=%y<ӂ.9t(,/I \--&5zIAiy,?.rNު1mj^gիRS5/?A[-r˓F.W?WMYS$*z恥NrzR6VWx x_,ϵ~ݷ'C6pUuٸn'H3&NKbم [0\uӚOS$rt$0]5hrca&xݬJXPߵ2##C)/Uzfp.2:ad#Ƙ_%ō۲$- k9d=l4Y2M>*.vjSk06H;HtQ4%/u}eiDbwÚKo?4ߦܲξYkyd]$oh N.MNd$DAyJ0 Aɬ ٌ.#m_oW9H s7 J\1O?G|{aLS<;Y*ZL&{Mpf:)xFr5r7k˽& @K.\dBY̥vGYЃ0k wC)Ӷ IJ`nP!F)HJ!nF Y4z+̃ 1PZp"-oT]BEnFhiiV"r]\̆RRRP:K4Fe"A OLL.1%$.piQI"fƒͤcu>sixy0dl9.gmq`42ERl2DM-猁i4˥q*:rA#a㎃8 #e[@m4Q.lt6k##̤\V[EXHX–UgFeȴ/}^\Hק̌6.RRlL&fm=a'.ix6$賔o jVPU͒Ϩxts%{HͲee qhVh_rK~,LJpǁKIVg8u(_90%RrmevQ`#DO@t*AdQ;9L%(. mF(%]׊)[et s!##lw :&.C&m$Μ&y.i4:jeEtB]ړla1 ^ED$-hZDt2]ʤ)5#9Z&th`HE>EIZT^Ų4!&Cs^@HcG2V,bv oIMB["{^b.ʹNķV#CQ0]ػ6,WŦz? ‹KƠaD*JElZhݙa7{nAS EsTdJx6 iBA˃;c gGbvl\"Pa. Wr,," ˥tr\D/@B]^j0g ˕3Vpi <o Dbvsv 1 Ü/RTkܛ| pȤ@gZ9 ٰC q#XEdr LKVJ (ȽHS1Q3Dw3GPFY TQgvNaVGE]% NK\$,/=kjEQ_U0Vr!vRDY)DDZn°kMB֊JΘ @y3V;;[KdcVGp|-c pQIb[0'Α'x؟w`Za=Wۼ3J7`"sە Nz- ] &W$Z-􀁩p1)3v1 -/5H#$bB"H!>ko U E:?,Xq@)H^\'6XL_ M<oĭ |OXduvT$ 8U+ϓCWaw%,Զ ))e%)( dh_K x1j$!]%ྫྷbT5ʪ+@ @0IaBQ*(S`a @pȃ D:];@'#6QD#+F΃r͉M T=viEPQLFi4@d e G쐻#1Ȉ"#+Y="nT\1BxгXe!Ht",UzkNOGXvHGYLgi&0QIЀ7 \YU۫ ^"Zz7j[ 3C8]$/kIޢC:[h:8,:Hcyȭ!tZLǪr_Ӷ32皖-Rƒk)nj!f=0PS(;5,VO 51isQS@r8BV  r31Gi/WC{o>*5k{8`H/7C|̈ȇpy@{r٧I%\ EeBw )0HHPԺ 'O H@GXoܨגaݼI^!|z_!+@1\ƒr<@b9v!M^gj CG=kUˠ~ aQ'#r `NiuƲ`a@jXF=_x;Q H>9.J{gR,JLגUMIAw-c|xfAp^-QC{p*R3QbVqZ#ӆ  (rfF8pJf:$zAB)#<%Cq<55zJL\Bp`~5EV;ͨ:rIWŪR盈00K1!:Fff%Ф1%܄$‡0캐bɱ&.kH56!ץekT{"[Tv#DoaL10@5aץ^Y_|4NU:rtkko4XvPS@r蠳@kZU_ݺ[m>IYŦgu>[S]EtJyUR\ {uNCzB !r ͭǨmWcn7)riwRt=AYE48}e.Ol}AwY|;6&s~6g_f@&su+a>4^|yT76 gcP}1vѾIAdEmry"WD&r5\Mj"WD&r5\Mj"WD&r5\Mj"WD&r5\Mj"WD&r5\Mj"WD&r5_/ZI1!=cN ZO1Gx̢bZb@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1^/H)cb)WsDz)]޿ 6TpP7|^眱Rƍҏuxsm+6i75V rU]!Fu7+@J&d$dern 7?_u0[O4h&a*7k]kZK(j/f}V]O+Sy^f;u+^|-~{[~˓8lُY+be; -B jy;o2t󷻻tw gxi?ÝZ[v~3w0֝)xWWǸY[oz;Ρ|Ϻ7~~15ᬱ򽞺%w_ݺkaqOw hQyY.n! LS )]Φ,f}g?K~fz Oڎ>ӳ=Y}v +M|RӭV[o/S)yG@,WVq"RC\hKVO\q5n/|ìkK/j.?gɿV,fs{qfEjsM?wCM< qۍ6&7'zɇGՊ!ʭP M~{&'_?09/Wyf_ ^VJǜ3~n*s!ؔB.+\G5#\ԛzsQo.E7\ԛzsQo.E7sW)c8jOmѝ/# R`LEw9[|'U–RםXL.˓t_2"nKi'pe?i&'"j)? ٸ~͗ 7v3y1SiaV$4'zyѻ"۟ C-oqqVqca1>D5BQsHYDd1%WDI%CŇM+SnIA[cFBMM|>sroo=bCq03_+WuzRhʺjB'f+$sFxg}]Sㅴ,^C`Er&kMeQmы\ԭ/*dT]F􌣉sg܏Rvd_h>/jyv0iu}ş<] Oꆮ+<.߁ÙJ'-5 Y0y4ilj(Ug[1P(ojd$nmuF~{DcXXƆ `W+1’Ed` Dsņ0}}I6F6]kc/ҞҎž(F< GV*aq70i1b=snl8gbc]jzmɬ\NMql1?-hs:*UҾg@\Sd|sYjz{~,Lo2+z$7⺵>?ѵ=j]wUJD[#Է52GDXn9ikݱ^t²i",B²#ŧW~:O4-m|II_(NvqguZT)F8":hvժKΉoP-ZԃKo9OYwJ,#ֻH!r ecp-7^qsiwԣ^_OF}zPԃBԃb5өtZq^J>^mVTt.tdA*Su)F8CfVnqnˈs@M;|rc2| :Mba<\]~O졽K'k} x{7RsS;t(\\eՎd$XA)s&aF*Yh՜7Y0;˵:6Us%}`޵C8筷;cr)VkWƌkgc6&΁v?3uy_aKYrTL `, 9YNM<3Fdcߋ~X:ͽ6Hb`?8FdFZiyQew )fxmTF `YIy*Ԩy%Bxmq8eyqbcă7wψ݌(|7>$L;O&M%Alio& mn0A^. B#,vD yV/>4;<lkkAXkCK{ `,Zˑ$+0; n' xc/qXP#[}XS8 ᖮeKo!y]:XVrU@&TlbwwN֧N1 wm \AQZ9] w T<ףCD#F)pzc *4D^$gz%'+#!8}2I6IELjfJD zf1oxk%e^$fOBI- JYl7QZxd68LDHj$QOBEZ iY͘Thi}I[6T % ќ> SA$tdvrdXfDLAmIB_,FsmCiɖFU^)O&ϤLSD2Hh5vh;u(Q JrdE C"*͙0dUDD`_Q"w% Bb)UWS`QH]|^,% HR޸1 m-p;Hz!7crYFSZ̅HeK ̶rt:龣3Aqof׭/xp{?'v>KkN+[+/=\z#,Ȳo:9J٩n1w/? ([}`h}ZMY/q c1#dqh_:3S<8:xSÖT{v*}nR/Rm}BL%mۆ+<=huMJw;&)IU|;=(+vH*"i|?KqQt9!̘v厧0U!53jhe3\fw+mQV;R)YMz~{ϏZ(3 řOH*o8znٯ7 Sϰƫ,pܾ1~Bjl:zYu| >R(AO^wA \ w߃ mǕ]ҨP]p> X] DYc΋eu`lw~Zf_%̾KjRlY8 Zo&bBл0.i&bDe\qX S+M%]O{VkeL> >.D~ϫ~W tW}Vr(c3,)l!b u9Y@X@_ ^r#Ϳ̭)E[:uk2|JK.*<2-ts!U+CguuGZߩ$XB4Z:b1'ڋhl GåG)U:t^\s _ MX˞Қ(t+rZmCLP&A9|IpE!X2(^'ibrA .hH;'a dh*{d9:ɪ7FHS\tvǣܐWVc1LQ5C됥4J,th Y֢E|74x ]dD'%Q9L ]6J+aMCr{-#_nzMLa;YS~ЉN*9ٔ`'C Ĝ jN[EVz2S&ۉ]Ό…,g/~4Rj|+lo ';~X#լ7R͓6p3Fdfpp/0gn냞{ĺNlxd Nk=Wh[ϏDƲ-m 2T(1F C^JtLBYCGcyEhYZΆG.Z`G8\pARJJH3r*R&@T|VpK}S֦v(Qdh> v,*CbNcvs(L)H|)2hHf+R$.8oճ t\|5_qy,%-s&Ec0(QnD" O'rM =mF'T(Rӯ㵝W|hB4MV ׬uofn_b3'eKOOb89mX>}|ƀ$=oząd,*HCe$>xfc1J  rpт M'G@`\EP&R~F" $߁̺NŤ|`%KrW EȆčyϴMRȥp 8?ܠUd9bG ^d1`RvjQaa1RVD)D h*l~Yse!d<=b%]"`KR7#f|V\Tޅ({u|Bޏ??sST4(>7@?. l7yMן.0ғJ?\5\t1n'}AixWϹmNo\LB^t߻u۷WR͛ą[Ȁ]FWѐ?Y'*EhQKRi*k`~-izuoryq1U'?|rEld dݧ0J@'Qox3]!ߜ4kIƑ@i< `01mLÛZ> g?ٿ߭[ҍ ~"FmZ%$x hfʭ?S&8gw3FYySp o+o_>|n'ѪY^d֑$U/ξ#݌'g^ޢտJ[ǽп$& ?w߽~x |?߾wB.RDi ?bO/x[Cx%&C6W=%&60quEJ f<#AdM>J$n󮊏Z|lM"m A$$w 'i$U\ V Ƞ"C[4Hڷa&]}>{)I (r.(,yM>Ҷrd+Rf)suZ Uīz/yw%KJWSהfoUU晵i( Gv]H1E*-H¥+wr>M/^qU/]dְ; >/+s7E\%5zyD >9ZFltFltFltFltFl'nm7e[6e[6e[6e[6e[6e[6e[6e[6e[6e[6e[6:aiXe!J~9'.H ɰ-Ag*8yӜeE[7y̌1WqP*gέVe^SbJd}9.?.2 & B2=bJfnٔ[@,r1iDÒȑisVz=W1b5q}f9G{>lއ}vɕ>(~SwH0,^?%%yKm "Eؽ]||>gm;XȝMJgWKOfB.okP _ZYt'{:]ݳu- $zx_|lq@+-whŻO^|Ț a/8i:`\ b%w6>LzKS[⡯zn7o̾3 b{5!sx>5%"$I: F'KduQR\ ̅81~*,qT {__,T_?,xc yNoکwusrQMweO-FO*^=]4KM;.$~@S lSq#J1ELEԎ;a-/ZJ\&?KV~Bz`赦&T>2&~M+[q-\5W['G]5ӯo4iUxrVy?Ɍ M4EU$9bFqoyLA$"3^(s{jM,GSHGɹO)Q‚e4R-c5q(ݽ\V[B]f j .ˉ"<\}}ɟ5 n0}'wnsK+1X:qREPR!\cN&t@j@,@Mjp"{.x1B递(ۨ S9V2 TohW[VDZV4lm{XT.g+64hP$(tfR4s5Ψ~:l#cMuFҀP J35\G $"m⠅ PCǶ\4Mk4휚vvռ˟;c%4ҕJwl'o;fyH!o{ím/R yEhFs+{H컇!бԻf ;*T F-Yd8A#Z p'dZnZ0>u,V!'^N1r.10@GcM9Eɿ୍(bVzcfoz~iz/; ,>J"F´خhc.~?9m~l> G_[n"웒'-IsxߘpZq!gj˷a^Y b,01V*{*Fh"0C!^HgEl\yC)d K22h"D*ʃ兤LxcB AzB%S R^AX Q{¼"YZ5O`^`8PцR^;[%I$/ܕ$el^k^LgN,|8zS%<-Vȱڡ+kA?OƳګ. ,Z&(L!MD6*{G(0n LKA[7(Rmbe+n DߕOH0g !TxLV[J V8[Gq.N;ydK e`)gB NEѷ@WXdxĵ DEݼ_YktKT)QcQ6‰;'oK s]G)W3@\v[7]Ǚaa߾8JYB0C;JVNGp)pGp1pI \&pYqPZh4.#4JɢJf֚W*:eYZ02\j+A9H1]!ਚ8h-Æ_"mށhceq~Uem=?O?]Ly,9:K`oǍ/gUq D*'4# ҦUwމC3M4laE6sԌ5lͿ#LeUX*j~/UEZnHzc.~*ɔ[8&'0Xh8 |p3B"RN &jh;tiEFo$iRҊΣJg8 *m GD#"Ϡ[=MS<\S ~R9a~i[ YȀr2Ϭ1&d?\"viߥE M,-9x^Qk[+kҸgyW򈁊3F_d1/g'[#TEM=QxVYǗMgeEJh0@P€ F v5.>}ˇ|>c }Ik/_r$D]^D@\̐91R *3nw3"1˥q.S[uUg/]&=Ov%=?? `m窕K)Jݢ¬ز7MonRozx  ?P)  $=6֩͜E"|̿2\7ܠM1b^O1_yMCՇ++VS +PӉƴeI{vVO7masv6rQxJfnʽ:onι;b_8AR1I"qBܑ|Y23Uxf YU;6N|#],q\6gT"7ږ ڿ_vma;ﱰR&eb&]e{;hϿk.TZ٬5IC_Ǣ@Y%H5i7SJ'FZir2!Ju Id1! I+X53׭x2zٿ8*}:WM7oKW <µzݨWKW2-(l' p^Ҙ`(+U#8wo`ZR)Sj鋥ԒlYjcut9RT\GlB ,[cV":[ssϴϐb4J)F7o \ XFl'&ˬ?'!MɷhyCh):BLrMtC@ #7m\HOUM"Ζ}$vvÄtೌ}|pxL,Ibg} OEi؛bM~!,^o8ɝϾN"Ϟ͟çpRv߼v!f,;1=>>v s\-9X+Zß7ۆ-֊ЫpKH?E:yтWۍu 垍=*?JP%>4$ ?z|# 5Ǐ Jev,Kz7Kf"̎EJc[f_0FRv6G; ^"Y"Fz@ZC|<9 䏖x-\r{Z>/3{p]R5)Z)Cβ$tLBmcJ#Z\:.+xalߓ.Co+`}xi[G~%\X^j} N;ߛaok|>>@wMB=Xko4Lބ4> 򐈔  a\z5.@[npf}`&Og>/\;696??T;2ړ=ݦˇl,x`j $ÝL'% L/ɅG| ݫףKFC2y֓*eI@<9F#o~t+Њ(h'cwDi2mSpB e4FR({R2<&AMbF3፭@<[f+|滑BI:3{soEB4C5C$qtϗɩFtZgRyt|8MڈWI\ʤA1sZC,8YbggOg19 *UUj8x4-^޻[j5Pvhm)ü~E;f =;╠Lqn*i]t^I҈+y%ᖅarDtПLgC#O?̑8ĉAሱQ%Ց;ɵ3xV3v/s'^R1˴3P/RǨ Ϋ.?{F_eo혏"Y 0r3b3rAg,{-;+--ڒVSd]Uw\E:9I&u/섍1"2 ,8i:s/$1#8P,CiHZr5pTo!i0 tǮzǛ\Ǥ6{j3L>uкfsՄt) kJԏQcYLxy3vcHfI5X 1 ktxBRݮSBY IHB@ 9J)ue z&_>9ǭsF(!2˙14.~Z9:,0#M)d]XiC,ǔ⁎3Rq#Ʌ) ^OS rg\Jӓ8\n.Iu%@260/QO]eKӗG43 IlR6 =@P)@cDcg)r*}۴VJhݯK* Ơs R5~e6L럿_o­)$p!M&娘x1Z~x!I1T䠂hwI EtXyc脰ʒx?]@[G ;e~999]<3br~1;׸81Ibh0k&y/ū~CV榷cz {֝;x#}x(I\<-XSs*J{[J5Dn}[$H0w:.lk%' $J?D>|M<{퉫!ۓKKW%tnbWdLy/׳:'Ay4Vf9^YZŒҘ<:W`3Qϧ4;^˵]Zc@KBVA[=ozD$,xQDplb7M6K0a* lH$5PmJT0>pǹ ,֔Vwk/NŤ|`VKo ɆčyϴMRȥp 8byj@i@hn2=Lz^d`Rvբ9҅@Hu !MPS& Uh$t͚+dD +1StPTϊϻX5 N]H}5)wgj)B'=J#:/hQIbv]eb-:*˼˻'7&?•SFM׍A _2.j$.*3x?HϬ%6'Ixy^~4ZJ.K7c?|9;'x2Mlx29 zcɻĬf0 bk-m>,Z |(s~u`GԵLѾ-kЫWW9"mTIyz%U4.DG:p%F3ڞGŻF͟>x}qiu`\ű:J{ukFdy[ p'i0ֵ0+{eD2ˬ#;"ژ'-,Xnryг5MSsU6dת]{ ^ެ%O4:ɣ6K>:'9)t0]1vZ)ߏ~zߎg@e\5݃rB)AnOo~ⵖKK>K^xum-wj!jp[}qV|[>h]mv.p[?F4qqG;W$iBy \{hOLrQ`j!bWhGJo0(IOmmXr%?!yS0[$-(h]I H|$r gN,_(kh ảv.{tL6y6.W @ߝwkS~(Km44smd4.d5xDcxw\H'ʠRC¯g}y򩂰U"KDCtD $,%Xo1AX ]kk9 a:c{tk!FA4,`HfLJ.%@M)z@Y%.H%s#w;+xx+ݪӜvBftf}EjFnƇx{ѫ=igJœuċ)Wڮ7~Wrh_Ҙ`|fZ7Asж Z|?yIju-:vUTTAeOWn?>% <1Rl*7N5$֟a3?0C9z(p<|]W m3 IUN1Oe.MpxUafHs]*[a2JYSLNx#W0T6Vg;gEWh:i<;XjxXzdgm(Ε{ _:[Xɜd ٥ jᭈ)HㅆdSD1f*G, Sp2RD"\:D͵,y21ڜ8=PlR aCy ^sI_ .g|2>M&'LO 9c^Rj$%fL**+Mw*d$QjU-Qfc(ImJmA!a)^ m`XC,̱ci\S.kWÎrm >%?ܵNwU~He\ @2IH251c$8R@9OjeddJ})p6.Iɿ制2Gā#Jqi&۬ рȅBIs#@J#HVg۳66PSڳG qU"6 d $ƊgԦFM4ϥMQVCoST4|hS) 쨽?JI)rWr͘h JY`C:h^*p&P`Hd2sY$v1Y&I_UC\ѢY)o6P[D Yxo0̢85_5-d%Zt [\O~?VJ2mcЈ+jĂ1m14ye(yDT׾OmNJ0 7 4j.'cG6́ 搅{r1*0i&'?֙w!eKf>,_":}?G?zL z!eJ tj@$2qLefeWx_څDwxAf͝/mxpy\:--9ssqz R"Y (,)Z9AT\AA쀻 jMI ":->kL=[KZ4b{%nJ65ұfF:p貕ʼ2syC(LXQ<1QM\SJj\;*ԽbopY?n(31řOp*o8zQ1=/0!dIz5xMϫ`7J!~ON>'7k\%P sPO>Aq˪gvbSlغs_%#:JF zn]Y@#A'd|xp92UL= :'T8!2'T(7X@MQ{dV)nDBRkR$ыMkD b\p,/H jU=ia#qew1vJ,s6qFo. f`UATÉ\dko'xFt)XM2iHٻFr$W~Nv2Swc`v?- .md:'l2-Kv6LdFYJ;]Q--+ lJ m<"L6~'U!)V*5C+_mg+BDVLDūH o VVrt`c<kͭixHɪ nD[$ q[bx8Dcm`K+~~yPXxΘyiћ P::a: |, Y.#17ff/LΟJvg[.Z<([mՏݣ:yaكnmԯVJxEGb5;8RJd$ 6kL("p w@S N(RZYL] YBQQ^"*e"E(fGq+y}xslyN6mtm)zGbp#̡Pe٫bVۻD]50!M DV;;rt(Iy &3t&$ I'dBIJ'ц)DC"\)Y@}I`ZYgUlj @]%I0dZP l?ȃ:ŴNu`[_ ymZh7zH념`V>Y&q.!Z-O=DRn !o&DV'z6Ðvv9+/þ-V#J㺋^O-xe (\Lp]QF*`H|UFC:{#i4r&&oZ@d%HeiTۖHX#% a!ǽps׏{ҝuQf@oe <+ug*GH]UjAT QW$V./F~ɜ .1֞cް?߫Ou?%lB:jg]R+P^e7ۂ_WRIQ[%T$*(N>Bj=!)%t to"3}؛X_nW*V$ R^t:fӁfi(l撌P?R~问v0XL9fI%9׶.2O:SId_%}/i`}#;IdWS*vG]&mrH34*h2kKQ9q E<[Pv ]LIAD?G5Ř4s9[S HRdœRXKw&X 6nFΞeU54)ί]]7q-s`SotÚ_ ݷ",pb6qhxv;íYnCtDgsjA2ggr>JٯT)s|t|آz-Ȧ:% BJ!?YJ٫(crJMܢ9E4_\!B}N16 cG;iⱤ#iC"ɴ;;gU൩VVCiot-{Efv $Ss(JY.'IPڞZ’R AKUOw7&J|NTJIڳĂ/Zkfm+Jy]،3ՅX U9)&'iqsýٷYOf1=䏬Wkc,0+*V[d פ700l%,mk.P?!(#Sb!r {\Q28[b.""ߊ;\NӚqy*Zwi=>Y4c4U')W*.:i#yqʠ kbS}Ȃ>I` Ⱥ&eba#(ID63zX6#g>zrUǡ5ֈ4hA#n~b:JSH a64fQꄠJ3!HmuْVڣX4ֈ٭V:%_g3.9P/*V/.֋'Wg}t B0,V,x[BhImmVXͯ&AKh$r]a3-R@7)f{]jApC Jerf6lz!H[+ RE~ϓ"l.rIRW9 RgUt>0R9N(9$QQSAƙLD/$QkgLr"C )F D6YE$zd{&uv!SpB8PepRb0[Ol}&ٌ=YLLwnB /”oc{~f^(/M56|SbKzk?]-tOT7ś9;3T=)E1HH;&Q'ҡQb ZZL2Hn3\W$xw($f j0p*X)e ys$DB.ZRyQFuB2#gIPP)ʈNh*It!Yr֌=嬗@zFƨ|&-_޵q$2?v~T] 8gQBR~>DR853]U*"`|!IJC4ysJ#hQbhvtHڲ@,pZDPo p1ZQ9B/bcRՂb1(hh>dZ1-Io}yPBeUHPA X&kM $F!s/牶UhjG#cG7GsZ5hEOьmFig0z#qyHq&HPmSjA" /@2-) <"H _w9wnlm0* 5˔r)a6gcK$qJQbb8;F3bزGs1cǕTRP sìXG0cj2Ks ϶KkS* #Q maM 1R╁Jx&w"XK"JthQi: ܝ xQQYnSvc ׎ [kIJVPY#ͧ\,ڥQ٬q=09OCetL%o (ĴrÞM Wxodk=:5dDl͇*1µ:Z(+wQLrL`Dp-/gDc)W4"hpGop{GA `ǟ$#qBhbN4Ȩjnn!J\@D jR 3), A 1xΒ͑|ap9O#u5]M]9,!ژ B_57bbRtXz9Q#ppx22E"$Ґya z{%d4Pf0 On+ e,+gm7\77<>Hc$z݀$ |,%!3t2 Vbjkiw4f+"2Ju30Hd B\~(&!q,cP INZĜS?SH7OMcy#.$kh1xew뗶TrCࣅ|,Р+>>3ƪo͵i"Å`DE\w%3Ҕr2[#lcN)7]Weqb[g}y3d`t S}۬NQCx:ͅ &eF2 ,Z#!{J.}o:tt;nshn}U!4>#/Jld/12̜,k団䛝m#>'Tu{ ǘuVXɎ` Extɖ/n9sg2sɲn9NX-S)C-OEԳTjӰ& i&d\I)O}G;&)o%*uh!q$e4&[i_31C\;&a>淸P4Q'of\lN͠s7a:sp$MjsNwLФaNޘRe`z5M^;B9ehkd@zpu5lwvǼ 9N~|y9g2Tҧ}JYyACBA (!iRR0)k^{b2@К?ܳ%[p>}/ o6:l\`d)XǡmB5a9'<o|*}wI0kooߌkE&oW>ۄSK?~;9Ѯ~EoG4{_a;j'w;Ї!q|QQ& Gr/jgUJy)WE'9$/T1x!CxoN.F׷GIM_Axd3J6LøΊ_ƗVPlrE7`w6>3c{GTp]No;sW`FjN ryPL?"`GѾZvX.8(  JXFLk^%tֱ$'L{>hZj_)ԾSjI()hm*ϐ9>q m +<'9q, -!v[p;H1&w`kR` {),(gU0K0IP&|B"262l d@h27g^]fOishoq*q} hwչӔUQe,jTPx\2;*>_^1vYM~Fb>)ŧq+).V-ji =!*;CZȰ~7NQVU`|J-@ LyEyiR-,-ѠO9 )pLL09ۼ4 u=^X&ȩ_9G7S`87œ!o@ %77nal0VĴx@x6͟ZJVN^xZsFb&,4aҥJ%hLSiyLu"O o ߬y/F| s#@2ΤYRc؊DR"D+cO9XU>VilVC}$&zNG4hӎϦyw0Թw;~}F Q\[lb̝9Δt<4TUo>yF'xEXW>ߚzM4$1TNh{$!Ii&:IJdpx&sP"RdAWY"0 Q  04FΆ|!H))kEC< B.RH"|.9$” \bQySlbs+/SZPՂHJ;OuRs| zSkjdX *xU0}b<b)x%`Y\$(̡ %3Q myxl}xuw~:kA;@@ݸM'^ٯ 9϶)&\%ZIg@?y?y\οEֳ-m셛~U7XK ʙҚ[%A; QH4(kh6GFN4JZNs:o9O&, o|e@rI|^QF=vT#&"a:Y5#YN6_=h3R' -A*)MB7Ze31hpPu=~A8N+uQhȡ.I'BG K9"M͹ 35]YܠU.ji֯* ƤC3A.*Ehb-#5H/)}P^i9%YG&I*Akm<?69Ws I/z,W٫g3SΎ>3 OqOiKb¯2,>ݜcqi{U,&ܪ7 "P.Eva>Kd} sp8lSQ| a'7BI)'v8׮{2>]SNMFpU%ol$F:J͏wemF-rzckjfTl%QRJ@ ZT{N_,hyU,gakɺ=S@-?#u3F]r[-N\]j9uuUSWP]q*v6ꪐkF]!SWWJ;u ՕP$;#uKfF]r>uUU' SWQ]g |\I幨B -۩ף*Ku6ꪐ\R +X٩WU:]@H,֛-"uz`pQy%?Q?n&ӡws3$r+.8U)i(o}{u 齓iL#v0ϋ74-2ppaZr}닃Ӻ!̢Кd2Goe!!A[7MLP l/{%N7ؿgfB~&/Tȃ.WmD 8u1Nu9:Yۂd mԒI4%` w2YNQ\.{B&? #S…J:( .0EhyM*ͰL"Lڑ>ir3z밉&VtNLîxZ()hT9PwJLv՛Q07sǷ%MKKi~iRƻDhvQ ٤ ߐv}oka㖢+jaqd 0z6L#_ټG%w1 .E}ScOm)9hEwŖ]Vsz?I[a z x8I?p/=3m里=ַ<޵ŵ;3eguz-;ϓVl0bޟFm6(&G;TH%ڻmeEM>ޠ?cv7LkWcŏnヷ^JI^vݐD٤;5I^{%*jꉈB *KЏǏMHguq+ta@7ȻP/}auBM>G?z3,ymr5RQzap 2Cʂ"2Vq<9#3@sڵ-nt^+?\y[hٸG'PgaԿmr_j]Θv^96Oؼj{߼hiNn:9IaP!F@_F x02\( (%:y&!Psאb;Yn8,|ح \; ]#W,XP2Me6f\$Gf5z8l821[nGG&dp^vFmgEΖpLMxg3M̸ǁAss<{`3ţy.TjBD tҡ}l64_n6ɢ+Kj;)g&gћ?xc#o반o~Eagl3o\fbS(m& LW؍ycRayj[SԤuxFג  VU c>lֻ+GW̵gc/o._JV/j&!ݑLZ TDa$^Hg!W\Iu坐>sr >5V~<^]Ƞ(2Zl"M 1Ῠb*)ntV 렌,\Q+H'-s:GL{ ҂r@W5rtz`bRp zukr?5i~ hS Y[3[Ә5GpGۧ7rD(.&go Fǔ 2l&xbUKh! !QjNrIPg.]j&@DMuͽb  8C (ע)VT]9s4;R|)-7IkK gtiO:+*Y5rvF1Cl`Ԓq .S:9riPC?QJpb3E~fOZ" N2+2B( ^ԥ͍5P82jԪdpJo:?yi#js(*yye9MLB([;QEqT\5+MMir.fhXL*Ĥ!d/UF8mp_(A&$(nQuVFi4JeAOEȩ a-G_s\kܩ2^k^LwN,~8:' 0FSjtҡ_%q1 .g՞fȢ "^Dϝx]6ջ%{Хu?SZ5-k{6C l"SoZl%wC_OŸZWېir"H~+ d:c#^#Uu\"O$^~O>wkF%)R9$7`t\o874 DEUO410@G46QX>co[5QVzc;{١̒B:+ȌPsPV*X]R6GDCk׹Zm~7lW|ƱsǙ\60O8JYB a&*G wD15-Y;pt8G A:)ˊȅBRt0VJUWg+Z*Q(@QŪ |_N}}OiWYfF&:&3P/g@iM "[RSX̑Ki'Uw^C3[M4jyZlwp^ |7_8_6R^ p( ^Wb3B"R^M%%#΋9Ffyi|,puBYRa0"tp4U1 NqF#ʰf *8:Xl#B Çf)Kʐ H'cBk5-rmX54yO -bp=&#F_?OZz 5|&)mŮ1KՁF_h382V2:4q|@8blDInuAr<4'3LsB8ӜΔ {&CAEA0yIyS0LF *9ÒHf_Z ꧨS Mp:, 8%CVʪl!Rk+g|V>i8rPl;Β2Qڲ-P;'9^A:I9IԹ/eQRш SQ*uo'|L|Q0!"AQOЌ Nd*K?uޗ~/!@tqhPr{ ! 0!<: ^B9tM.kSzo'fZ 'c-_/hdُq?> UV2[;#8]$Ga n. ̇Lvgఙ~ ,y-yZ,ٖ,m[r@bK]Ū&Y$pƵzs`jL,ӥVE抰 FW8+Kmnl ̬פg&_#+#YCfެ\n>m˦+|o꽐'kMӐ>!Ӯ }GN:]&R (6xh<6^ՃG(xT9I%!9 id:k5%NFf1[OR8g4K)%V風Hp1S`{z@o0($ӑ+ qHglt{Cj_R#}xv~ǜn{wifhAM~ݳ_lg"3Jc*T+FY'shHڜTñ ^ U#_Ƀ.Ŷ)>@ #SpHK Q'7,T%pvF| |Ehn+zQ7xVk @4=?0'<I955gM L nD88FJ L&t_&SV4oJ $!$!(Dmn'W rWa^C M+bLyx>QOupZ%Ѿ.?->:BՀBaͫ]e7O eP!`<Qj5OBJ~ŗG fM%3nrT|Ȁh|~:;x2y-UI4TÜl'A:9B'V޾w?~~Ïo}tכ~?{RHPh8#_}Z:M׺Q'|~}C^_kPt[}݈,z݀*lu1u'nMJFh՚_2lW$I!XD΋HBOj`1W4, 4z'鹣 ڥi_qx ܦoÜmWI*p%P&YQU%IF0N':^=}מ]o3X 4- 7vkgplЫA/BW>eX(p3sMkR*fz~6|VC)H()rVMr&*|8Ann|~$[87&3yg`2kޤz!iC|8|i .4M6&5g%%rXְ鍥0qxGS ћ0%0ڑWSΨ %;,"9;W|4fpс'Q91aT͵+"b) KPb2glDqح3r 'tk8H<m-5"&IV&:z7|2\[]E P5̿NtTbj۬|*}if\B>k[I-P MV?R6|www7779;Ol;sn-_|ws-Z^s3?ۻyCjxTg̞=zGWOA4 { ּ1,Ӭ1|>ǹ8L;<7![ŋ܏`/\ ~z@E-~b Rn{C_=#Pb3֞Ϥҧ"hd"(T,FRZ`a6#;a|4pFC0=~`’kf~;`V3 Iޝce1 >j~T|Q@d(]! (!GQ^ 0c=x6W rcQju=Ag-t|ކdQ]HOĻj<>w6E^UT+o*HeI Ffvi F(F NnT%}sH@V=iY*1ʮ,eFV +5cgܭ7[)/ ;㌇B۱..|R]W=4$!o Mǟ'+ADS]h6 K k dQ k4Ue%YR;ɢx2 #JjSEm=*/<)oJ :؝sƎQ^r.wEkwڮڽPo6&]C%6D—J=جOLrk `&uYlH,aCf{Ie]I$,ɰHYGnejs>lr~*Ɲ9+y?X YWE9YյqEq @#-�|<ݚy6r?H#i4xԀ,~1{`'Ǫ5]}RWߢ^,m\LOkL(Rn" ADYZ:#Zct?O_R%)߃2S9kxmA-[Ŏ`?͑ .|+4WFt!UON54* D}SPה5%>1VJ.}Ym+O_նR)[\mX~U%؈QW\*^캺b*7Iu%1"⺾wl92 y [h+L5~͘k N_)L_щE0&Ђ)"}1@212E))%Q7,%@C4v{vӵ)(L"62(R n),e+9(/xj1c1T\ ^iCC%p*XaUy*gnBpe * ? <9P=`BCYJ-7ѐ&g5ҁkjQ&ژ1,C.> jA$d.xQɓnߙ~O|9l JgNJ&HERNIG$TA:t|Fhq* 0XhH.z!>9rbXSWEuŧc&/7Dɶtr+!S _p-(Z^8l:hd>H c#q4pI",9|&H`\iQ(ڄXBP4PVĕAHc0CMК3LIl"Bȉ A "HA:+WZQ:{UЕVҕ L3V1h#\ZύA*1IPDRBHtRG Ĝi1bklLj( }JhQ-fwY*GG^o!O6c{u3S|t fQPLvj!^'1SɺDc3$ںEKҮ}m]}ulc}ⶱ>e۸ cq/UnXI7Sn4 2닊ECfO>_MP'W7tcƠA>5Gψ "̛цA) L"V>Bm`*QfE *K I 2KKNSp6}BkҔ7^ v5q\^t EtuY jf+ "G0m[vDž U AK+Q:V? RK+>vJ-}Z }D*3O2]\;gO\1(6\sYݍVn\?BtPqJ'^۰'>@Q5WJ1^ v畠O!.qߗ󬢨u2N`XWSBD CXؐlP!}\te}lF%7CS|kn[vMɧ\s>>~_ ;Ad\}e55B/[6.r%s4z82lp.e!m i|nc'`0>jTb-)ڕOm^ i)L i!ÖAbH^HS\kuFHK 4SNh ')y]nop3okوV4=>XW h"tJTCD 5OZ`; :71Ǻo$uδ4c2,Vh0\թvox=^9#F,%scg15`S#)G#+JP;GHUYTc\XFz $Kh֌I*yb[kqh4I WϾYoKm)*Cs]YdYZ[GVe-7_M2A"+L,v&LHAaeBIRQCrQ1yBP H)or6DT-ZK4Ȓ P,3x(dOGc\VH$*#/S5rvɛWS29å^]~p 9ܔ-0Z .mXƮE+UWe**>/y咟j( В&π*KT3=ȃw~ }yGyΚ N@xA@)pUd$#q&E.ȒkV%!m:>q`%hJcpm0JRHq9&n@\m;>"g;2Թ asA]8XJ){i6ލ}l֏gٮm<BϏ]rH!x!4ɤ1TNh{$)𵋤Q| $iHCԚ"RdƷ&K˨BdhD.hCS0$y5rv<{ A:N9NQ ihG)  (P2$cC(L9 %f5x78&Cb&B>۱ Xy zJut9 $ק5`2,@uFneӢ|Um|1Xt *,E2`O..8kAJmc'csRSE~o׾cm5 ܁m@~F죟ymP |M3 +@K:9 #@^NNzǻ`vG prEa,5Yl{Aj,vIM &pw? BFbq$!׏t4 kf\;fLpT'\YB17ɦI8*#GMiԦ*^^GϟGc+cozYl3MvJV炨O_OrzZ5+Qó~!K1;QR+=\]TT)U|TNN| O($/~/?_믿rŇpe}N\D{~=x2_>В547ehe\ql> )Ĕs9Ҍ N$`I@mrhY#Ҏ1bkhLe[H8*'H>Qk8޴p9{k UW!KY4e(@)2[U9Gft]kݻN>]ظHZӝTm:z |!.u5ܺ=.͖ۼUزE[wv>yx{YJCK-7{޻Ŝwa)8硺-שZi9mZӄ˖J*t37ΜJ*SesdnR"/g]3oя!9[T9?L&?w[~K}WiV}L}8G4H]]o9+>Y~o#ܬ`}G'K~l &%L u«qvW_.k?/=+dġDO{>E7~D7T̺C米]8\0F#ȡEafMV!=hFKhc)J UJ 5YA`v9ICRcTBUXfTJCO-[%$-vLFUl8mUaaq,Rc, O?Wfn|^+퓿<2gFl/+)eee*l`uц:Dž-)b+ 4Tg'*PPaS<%º2:SbZ1b7f鞋9n:Em;`ω8Cx:J2䕍•:zIjJ9iN;$$ӚٰG0!33^tTkRc4Ls>[Dd:ol8a*e],8]cD#"7Y7%Q@A묅 (r̒9AJ]8LalrV a7̐TJřBM6T1' HʓnffyogՁqqNR*9A獸8 B` 4: `PQi)$9)gRLJbSP8ec< a5ʫx %/_MIbS|eU bziZ$CDZS!oHczLܒ ypƣғ5J*FnJϓ-L5 );YHQx=򷣟mؕCb'!c `4˒0kZ(Ӵi&1(!fk9!۴e1G^^%ϴ3%mix^=Vnuh)]DV%Kmm;fKdhwn*PJKB@$ً<;΂Zmͣyܒa5iBjl3A?/K1|Jןr #DfdJ6h:YZ:mC/pbu]E^g9om(tǻAVgFw%AhL qrĘ8/Iuٗn%@qeӆ[p~ ㇓?,w}%yoEmEmTૺlQ& | UqdH2$b4&ƻbooġY]M4H3# l /g oMs&RLyy}#-a̝:<}P^)TuTef@efBU(ULҁIc-*M9H" P#㖩&zB̲mzb>*Kֵ˹,QG YkcUi qm آ9-rgg^̿3jH>FB hԜ뀝|5j<\%൨9J7t5:J%F5P͙ojW_fݽY-O]<I$B  ]pu$ x74dSO_My~چg?'rձHhP:ds-c%XNrJ i,!2d *uܷHWYUL xҦ\z_k h6Eǎ7үMiH#v1 ,x=~^ Ӈw젽['Ne;~o="`Ϩ(¤x S) @QPڕz^& O9 eKiD B]Ck4 K &+J߬#3G(H&:  JlM%Dr54I-F؎)5ݾ/ fm5.m;P1">ڭ<2x٠VdP'BAJK Jp6萹ڢv"<}X๚_O#섞y#DKNXrr>+L(ؤK1:wR  T㴮/ A4^9^?pd!CJ)QR(xt$!Ps!O8Z}3 FX7=  =6'֞ex||b' _jՀW'mqJ.򏒒E&h"|)[0hz s#H&XvZ,cdZcӄ+.x}r<| E~LL̂aAO$3Նmv|lgns+]/O/+S V'xHUq8\-vLǗ#BԷÌw5#n\&W5 Ek±#vڒ{t~2_v o0-e餍I}3+b> KʢX"%DetŰkx{5< gJ -ؐ%׌Nb#1C2z-IBȺt iqЫ+0nP Qm7xmc^8xrѩ_tQ?/T~H:}kSuQ/:?y3AUW"1:ӁC/B|NLXttt:hrY.8"ѕ((dlnm&xBoK{+w!Kx>7A+QK1)ռ?E&-U0HNZ 9JW=c_o2H#zR9YzrQօtc ۱)buAD"N v n+ϓ>)OS!O.}MMr)u2|7Orއ1]~yo=0=ձwe\uxƳ=燻w>>(QҶ)9ǵn#W^jyX6ڍ>?n#z!ɘj,Y-[+uVQ"}0- V{ٱC_j]\W@CMm֫'\m.5p-KtL^1VU̵J Sf.Ej,*U9itUM!UȵpWYYޓ֢>$w}]f=m [M}jw?RzƁjlɻNJ^O>uOh"l& :Ѕㅢ%F7)j US:Ee) %+|Ix H6KKR;vP+6;,Cl^xFU-p< ;jkCٓç=~YgIdltn/.oKY_] :D;G GD.E0cr! \H] !HSڊr*d6ٻ޸$W} )}0^gFݍkyUR URDhD&3_^/#"3#TpIyS0TF-hK4IJj͎H FGA "(kq pBXbNY rT<7,6g/.o>]Õ_]}[P1}YsWbxiz1 1V?,Ʈ 0~s!W_H^5Vٸr :ۡbl"05 jSZ.|%́Qا̙lud)'{! ^rbȑsc$[9!9jR1I΄Rj %G'( Ds*-zזk녥!RcJI4'F(.]6ml rgNx8wmy M_id |; ietzo$W$! B8hYg8nY)ɇHH&g &f7 Kä!ik΀IQLLZ R1[s18[n}+3=3E$-s"DHBL%L9gf`) lB+#s $ k$Q!'U@Ny4zy)Ì0["HV>:څryU rz7 cXJt*Ǡ y7E2<>S6axU2RGCs9$tyVkGl[(~-4>` ́/cdDjJh[6d@i&b)IgeĐ2T릭/}= iXr:bqFPδ19ψ2yziÒ2,PH0HIoiΰ=yGvIL#D+O֭p2&<*3^(& $ِ,;I}dp@ߛAf 9*2õ`,H̞* ̢&HC´HN}ͧu&D%$K4B`6+*ј\ WVWW鮸vT)n{=m_Bgg˃G%"x4l M(4-6Is GX>b)Faݘ:Rur41¯ͻśۛ竂=`{2cݶ.vHTƣoo*&0;LׂM%1$!/ӦbbX`y6Ѹ'hAlE\ǣݼ27UN2pJn*uS]`+7i>G` oIoi=Ÿ>|oti%‚l9P|(8ֻUP=K. '4SqIm4|Y~?TU#Q0I$ywoo..޼}A8:藛q& |S޾hI*ʛ͍ئhdg M*o(o_grçXQap#fAAUloq'n9_GnޡG$qЄiaK|"DSxh+(M ^IznkÜ]jo--o#[ĭ{Ѻ x2pPܧ!~UW~r5l$(̜PFB z9+]L-;n7qR93f*@U$+@7rPs`<`7!$m!AYL 1PR73@,6gW3հ-u]e٠:}xp: tkbʯ"C,6yށD1<ѱl}$*}*O?mlٻA>mN*v(=5ze2怆U1uY~8{hLi95f!,!e9gP~2#KE+){qs~[uf̏Gy;b2$'R 9<qaƒ <@U]U洵gȋfa~X -#.rWqÌ:+~U_EͯUy󎚿-08d4 (px HR9-`4wd9?K]Vl>r8W X,2.8Rd)ɖ\dtQ[h,¸'YV2U̔:rE9V"r6sSx'Gz c "l ZuG=丈RMzⅣEI%^@LOe #9攠N$*haφϒO19hH$ pcU^F51'jT6Тk3y46N:B.~MYw/yBkiYyX\PҷLeIsy܋$Q[1qfLdSFf7I9=ʀD)FEtFE~e 18[mLVi rn uυOʅk7(Rr4u xfv+<"-Jщ LiR\ od 5dKH-@LmY@3"#{&W:%f6j@L$!aE0c7qQ\Q1ڍ}YKɇ\Yr:ꪦEM"W=cJYqYiԄXCXaT !E4pML$(1(&΃ZI:itt<·R' 4ZHSc_F 3g>B-覚d&3L$ I*@RHd0XPW'ok6ܠ(-@7NRp4Q g|'hLh4̦/5qF1AW{el=yfyq_ϋ=/nw4$2y4(Q -dII(K.0O%,YnU>k#y)x-|ؗi|H'otb:! Z}nS]R==k۴1r)}%*rX/6rbz1"|4DCW?omTDbFE+AH9oFYuW?k>jqc*ABƊ}*г+Th:*n .cӢX9}S}j4< !~o/3v*s/)[pqa2BJ[  2=h%Pm"ˈTSS;7l7raem9#DߎR!Rt.՝hh;]JEYOWGHWxivɻBwb]+DkZ/]!ʕ1t"ٿa9P/fCE8+WJ]Ǩ;Zy&ſSd|S'К7ɤ쌩3"@ РXq͖__o1 Bp/n0@jK@6Qc2F- 'Tٴe}~S/!4fD9P*D:!jp} 6kff ?{%ͤR8Jb i10!A=+5{i{v漓ي<~U͉wFFOm=\FwuBA:BW؈µ+thm+DxOWGHWFpR]ɚNɸ^nuкC "\qjΟ??f;j24vЕز)T?`- np_nh_N( +]z*U]+*%3tp ]!ZNWRv0ݑ.]+Dd QٓCtmw BWѶ䤧#+85]+ ]Up;cB=]!]I%2d@Stp!]+DKE QvutUu23OЍv5Z=.x+f!]K`i"!my373WcG-ȜF*Ko$e^'?_+Q1d32֫DR*1Ze kBFtt({wcv%]O^O9ѭ?(W\y'x5_GO=E{1ZR#-M&-8͈,|N{i|^,] K{}p0 i<6B_-n^A\;? c˦ .cJ}}?g}K6E3(jDh]ޙzOLz$g/f?%綣,{?c z,#`䂁L 21'Gec!9(V˯w;{WWHBϽa#/3Y `wЄ!o^L8(q޼Pm${?ow0/yNW Nqh/vFu9bbip}F6 &gwߣ#LXu;>LCmͅ}Aj_ ]ptBl48l )xr㼵 S Y J戔 ^?G1׆<9fd Q{9w>9 9z<ʋGn_őN#Yy!cMVXE&{)".Ȫ.ུsdu%F;ːGH(#ѩML?Z-}1FnhP!J%1#ԀBDYW+ַ) WAѰ,{wo>W؆U8(M8Mc[Դ|CFi]ٷ²5cU^Cx܌\gBm>خƱCGGIiVlJ+7_wYiJݕ( Brjs7G˥07i咗M73aT|rp,_|9$M1dH>k%0HCvJ [os/G~bc2%nܖqS~}R y1+tV{Nms\G|ٜԉ xYeW_^뵕qS۳8J%q~OJ\3_ߚTaq4/i@*duE<ӍV6Yha>FuY]/)fU9Z!42.qG$t f 60$_L ~jwqqH[>pO~Iu[Tz+A$?\5`>㧆e4_5e`\f%r>x>G22`:$}F>>浌s)d:tI. mZqZY Ҭ[Zd?|\Rd!sAglB튙L!vi[-H}ƱO >  n9d==-Fٓ<-Mc;ݘ|֡u*k-VN5}o; ֒])LvH땐̰!-(=BgzNX2re/kT !>ҕ *& %&uՄzXop[Z>M(F֖iU@O%K b"Ι+2#V#sV\bM:BYI:g}~~(ag,fӶ {-G^zC'z3'rA pJû,Z?M,> Iy W9;ژ4b㙌 (4!'rICVBKڝНp)xQZK襖>XF(UQKĭ2خA;JŭZ;u3zX9NfE2yeŠ9bjPި9 tpV4!\KqPsF9 cr:r;9}mr .C(lN#B7$T,-kq;p8YCFhŶB,YNeI^Ɠ|/ƮfXhjD9$߹r!mwFfP.= RA!p1,ȔQ5ڻ>hIZMO2.rk*Bq]$r|!I"bvr2W٧oAA.g:c,ͦm] c?Av05a*?ob:JxUJf'>(/:Ձ#ggNKRZsR$ .'B(;VBd3c.|ғΐiJ+GS' 8FrLZ8tfA*OQ ٍ;;.7~bfblb,JzH##LVFg wm<~DXw_n-g e"7g5 ^AƉ|^;dFRX66gG|&BI9m`ARD-0mŷK Y,ࣘ%d']wb$ٲնAYdW ޒ$JM!0$xNgY鍜 _]7A쎤H4Ry=9⨔6)))+LBʑB#KRؤW8.4 B]kWƆn-rn0@`Ji*[Y,XC;`vՌlZ˃r"DCmxǓ%68QXK>1\џ{/_#,f f$YzɆ[*[h#WPW_N.J( 2l,^rn5:kO= jФ /]\9%1z ==b%|b4+o/$6 >~*Q«χt2V*J@PK)h>VCHWB*mz\e1apP9G?N$?`+ƫ fZ/l8J]H0~pő,Y)e 9})Q8:a?-qfxFSR8rJ*GaȜDf,Lb]tN6GmQ 9k'LO+ I|oHۢ}l/kEVR%X$M4ݏ!_U<Ԧ3'>ӣ 3 LJ~piNփyүmݝ퍗g^f7C-q ?|V]  Ɠ/gmuW~dlH`֎$##aV mOe.> =sx<LT;&0.Qۭr^gݫLR/.tImiAZ|e@k4w5SAwRTۦSMq8n# ɟHIyˣۣoqGѻ/i=S:$ߚnGނkC͇ViMV}zqru\ Ԫ ˋRڷm.&vqtٚ_)Dw$ < ic.zJ p jT23ړDNSGڥi߻qqQl(=](E"2,DlQKu}K'9zE1ͫo#}bˬvUMzd- lmCVV0~~Wj/]M2+OB$GNlv,:'K@1[O7x  xJo)B1,kfА*E\ߝϓr*r.\U[ԠO7[~.CZ^SO^f,[i)8ikO>TyEvFtO^jU@q$dG\1uS˳;ZU#Şm>tE')>GM#̢EacDRW\grBʤm! )a˒Y"ŒȉީMݳs9:nJOmoMj;{gowt=߅7zrrVe.DJڬ5jKlȠ1f=2nu;ҺZ=-xvG6w$3vH-j̹uno[k=_4A+-oRau}/ Oyh`B;:( D+%BdM7}psԦI]S-{{N;pW<zc\XȾOGA̚$~e|U#5!9)e>]v ZUhӞ2hOKHRXdrt."pE Bgee_wȓv&Źav9@ @)r gܧgxZ^%3J5ץH?[A/z< $$JJfcς9w *~N 8wڅdAv)l_G ?VRJtv3EoD[}렭)U6R=報U&TBlΥe\rpLLJo9TVVMՅg]h.|T]xmYn&azv>skhe4|zt'؞]^+'Vl0dfTZdq<.$,IAګVdIP=@@QBHh dJ.;.t( DYcF;Liw9.I=N>Kmù[vp?bICaL;0^k]y'ERFٜNOFm1&BS΢7(Dk+t 7no-lMV`={ܙeylSrwu/ s5[lQ=Ъh<ŌCJ^"qxj^EO\Qo(W aeAL)BAMm @}k9pbZZtKonie<񈌔ȴ;h@e!7Rը>ȬoO8zQ;O|#@MU xmښy4٤#AN+4#ж5[Jj! ?Ҷ;S75Av"̓1!K2L,X0 8 +#Xţ?XcUϪ/yt|Aгٴs^4me}o5$J"!ݖ/7ю,^h -RN8g W(env- Iע[3XIxP̓l=+ gQpC~cb2hW`#dzfMm95.11#-XXLݦUK@xSsjZ2ҽoEn#}9ν3VOuL֑YGW+`aV6i2Ћ& 9`:qq$8Om'3"A$qLBQ%CJgVJ.S'&?{ʍdp?m>/E v6&ɗ >mŲQ5VKl+l&pm)vQSdjn!J\@D jR 3), A 1xΒ͑|gpԛ8#j#Eٽ#ø K|':yϽWύijvE< O Vn5ɼ*6'Ca4j }.6OFBiHzCHQ^Eoġi[8 aE-5lſy!FF' |2N~/IXIBX{UDY) @6ikUʃȫ.ӌxE$PQFI@.k>L1Az+G"7<8scI|˘T&H@x:j>1g o؛8cD՚>$(wGy{{w]ώ~T29?!N9Լ> |! СZ;tƚ1pqPUY ~2QRC3phIAzDH@i+FxgiU &eF2 ,Z#!okk1=ydꩳN>}nC;^[hor8grj/xNݱ!yoE%//p3 BdL2JЩDy-L^I@׎3bCzQD(a/!C!)-$R%{Dlc2JV>%QUB嬶ڱ XqW~GS6,ȷC"|Z/x5u` NRL:pg;w^Ka(˓<$xiǏ7T*k8)K)yyr#b΅9b 9gMo7֋w(:>CC Uon !{K}mʹK_+74p)Ki9Ṭ=.ɨͻY`A3Wx_f%,#!No3d=w'Wzz'||ңח~NyNI}էqL>LhqMsD mbo'F0BHa \qCp! ~&ѿ|'Mvyʣ؀58x5`G]Kk3r[JsXlr}n5a>Q%&9"$wAX(}TJvR='D#R_[^!jI;ip9+|ԼF_~i'Rypv^{ڤNU_R[eoG̎/,=~k2+ ɥnp")ȼUnk yρ_z6%s"q<{hoNe15❋=`jE'Y\׎u_Kz:Ђ͎g[p"n8^utP[@eX&j۷`QrDb'hrj9VZYh׸/ime sg>n)dDΣ[k̀wYg씋(bovEOx5~~]'?i|9{ +/^u3= <el \ːӓt)jΡvy:h{݇1`ro!)l?Tiap)LR&s7T <0b|]Nua?M:F J=>XD` 7BDN H<䬦TKk؏kxƽ߭{Mzޏ]?O2,3?֞7v۪'x/rt]YyFAEFtF3F8/4q<&]2u( k*<$MBce&P(bOGc\ %ZZ28 y&c( 8gW `Ҟuvǐ36n熱0:cv`n3.@L/WB<;^`Zv|^ c#1\+IFte8J k={1vpv%Os.#c$HƙutCXb,'9XUnr#e2 bԚ;]Ѿ7>aG7uXزv`O8X ͒ =44o>v팶܄mEX7W-~!AMVEDY.N{$8쐤4S$S28ol~o ^M@>JD$ FdPUsY:TA[_'EXy~,ejQTOHB 4̝f}D$a`Z Opjz&'qO|V={̍ήJ #?`78,AzO5߆%R)j%H`(Z4G U1)("ȩǨkײ;\xld "wB'}^"e1(821pҼRTѷouxHbq V*"Q* J3@-MB".?p]4Qouw/֝3nf)O1Ⱥ&XLQj=E@L ؼ8p5#mPLxdD`"Hʤ&(D#l-\!0RޅfvtR9%O'{LG+ &W̩x_mpζޖd]$Fĸ緽Y)v[qtwL*U2vyo~jm;7#o"{V*5Z6!u0u+{j0M>(UKgdBC%LəbNLYK;9)g'a;,`̪! H ˥CRrT灮70nwڤK=M3(ϋ$} ?71T%n[S_Bo kT`FB BOJAu ÀrѢk}-srϽ>5{udb_+SݯՃWaNKu0y=Tߎ풑7~U0; w lm-]5Cڛa qO0i.> o]޶96u?Uǫ2Ʊ`Opo\VZQ[QU-{?]}}? Yrζ*{x2bI'-$9 U?҃1O/]SE%տKE7uULg~s9,?H~ỏߧޟ|<}SLp{e\:kG^Msiho4Uly^7K:g[nPh] BbJ^[f¶_lԏVNa};7$QЄ H+l@kg$OMF@ )fGRPNIzjk]jq'Þ:0ET3) 7kP:bQio%W)IĢ;rSӷC ĊhfX=wZ[ayσ.^qYU /1[쳇oyg0q3oy=OIqo"-z|~s_r.`9V̦adKE$"_hY"Mcpp8iNdM8< x(0喭I+R]^ qV|z1GJ" FVMVbёGv2#H$"1"*h\Ͱv愉NώЈ T~YO d&#QHYu2Lw ,] DL&Z'%")A6qv<d; "rroDe^پPٴRZ]16OJ+̆f7@$YQy?9NP𚈨AbDct, ZTXM|  ,b1暑P2zE)P(&=)ê$bLy߽ DȘMȘdl3c_,PvXxP,\9͙]%f_/RpGtySٰl@a᠜r6Ȋt 5+`@T8*UO8a_w(xJ g $+"jn QHLd$&:łKsI&>)3"fg;"~9-b9tLldO\$(/.6ubˌDqăUB9Ls#(\ZXjDZxkޣǂfǾx3!awcWuq%~+?ꅤhF\fHVP$)G*Ic&cy'w"y)R P`a `Gi\DZ`T9F$㑅H>t]SFDD b FрGtzc"ҙu6qv#Ztڔ_y?_Ám9Aj 0'ynA6Շx:p7}kK97' <9,c$>JP*Nq{0#픥(X wpDHs0t `Ȳ9yb8B49LRZQ"ÈH9,}J,Rg2D'Hk ug%0l\n;\6qz!zcWz]w%}Va]i`CF-]++P6Vxa $)ǜxc"|ga*r58OOg羃V!0$a XdE64jpǦN ' $n0;@p9^>R`!&̺*(FG.ZX4Ts'eskBZk$V}Usw~b]-kvW8$2MܚnpCww!3 pS~ a\PCD"GHBD l*$S`x?`OD \ELkB2@SAQDIL:2#c_7K$g-?8뻩~kOUQO/ô쇯"@2݃GfM뾺}v,B> ך0XϽOaTe|k|ReYS?6z3 k3l^};TOmK\@dRݠ.4W%?~M"_F߳FR#x׽*ܷJȑw^"*u0יj/ţ/B{q: fY|IW'bڧK? Kۗc2[x3ɹ'=*8AݫaJmiO1䓶t-ިllHiawK.B||q5IVx@dt~`nOdX-9 ˁ{:Yj֦Ej<Τ-S2WAmjLh\ӤITԡ1aͼϝ%9 BY@4#7J-jj{l݌ Q'5Ёy_9<&#X$C "'z-s)%ek$Znk$*vW}]\\Tה+mݻɨIȝ"I.ؐ:c Nj!\z] Kեj{n:LN>s& (eFP'Am'2b)FWYD-j@ef 6b exmД#DBprOƀX% EAfݽ[/7˪b{T|WkO}$z:3Px=6G:jK*dP i5Q%RoSPJЁh,wpi,xHUM"'bR GR@7HP(ʃ1m̋ Ɋ%am@muBh eɘN5flig]} l EQmLmR"4&KvED³'ZEW@Ng[ES0b6eK\lm`O$Ad_dZͨV<'.vD2h륷B,Uh&{EìQk }ώ8L'vD{$|{Q64e+3Ic^6KdL޶Ȃ:)݉i7z9k"C3-9`PȒr*Uouf=̋w^jPS95沖QE<~ Q $FA]]z%ԼsuvN1/e;'uN0:'ÅRdX:09H" PIA "@1˶8 J/-Z׊`1:2HGD@eUiL@;={BH`"}Up[|z<;iAW$ǘE:Y$"לb8}O L"t*gcq <4"vdl<u}%&_f&m~e ':p1?=͋6[ϵv7]HA{rNp&a X|PN$d?Yl8ce33jTĵO!2d *uܷRISHpeUZjH$ b Q萹RJ; gW/.?z6Bψ+L(PRK;m vQy*]K ;WFiw!f U(xt0hBJ1D!oqfVs>cK-6qd9~fyy`? d#ī=Yq9FyJ9(N+"_T!4*C!glb*~yQ nFqߥLJ%ESqtM5"d :x9e9'»8?{A$'[[4.4Ԝg9qy/b9_5bZ|/}$߉wUeENaM~} ?}\հ 1|6O7MvALkrO6HګĎrMS/鞘jh 3 3[ح4NizZ("\og_A[7K>XqR>7hg{v]ҿKk;O/>x_:Ag~:n-t9;YH=[XC^*'Zizrim(~ٸiې/msUx-X~[^泴H1ݧe,.}xaV.m3IjƊ+5e?EuQO'huq- `<vM1ԓ2Hq@:Iz EByY'T^,*WxHmÊE4z nRj~43-XK9A"!k)L*!j^J PFkzUc9ŞwC/;2nwGƷS]ӏND1/qH԰u@lJ&?-4DE{4 ds{"׷ԁާ/\4掄ar)t>EF" -Ѓ-OQXE_ ( QbqZd $ɓrƴ&iJfzS) H3 K[)C\b}s} yJBTXק,;|/zb2Ӈgv~=uCVrSr垘uUb2'~7?(NA( l IWF o\a)3QHwtD!APHmA;%f1%J 41$u[(Qd:>w& %NZ ISh ddvV]Iv D/J{n]]y8օreS؇_0 k7u심 äފX>9}VS*!2˙1gv{XThQra;#s(n"\I]Ug;ֆE$/xɐWq/{cb1Eu6,-N=VU6#$V$r4n@vILrm[SeKT Ȓ, -˜9yTR ]Ugm)3SZC@D!KڰD52Y5&oA dLH:f(f M*|e„!ԅ}1bfm]r-F#ۢ9, L4!zuV-=QMpXs~s Ȝdr&GHsD'-BSᡤ5贶x99vPN*{s;miC[Իϊ|ZzتXvLVJX3bNS{ڗi_}z<[ M d\ τM4+]u 1Sy,k]bx7YeGsE k,cUg;miI>+-6h[5ߧ/с/_{'٠O1uK ZkEgշE2e}f2",CUx ld+0z7pahp5.4w -]WwBJ/@vQC{-Jהi9/oNqp#7IK(6>⠨SASf,W"$eoS'V{wjܢ]v;P}h>ö} -vFB_ksռ-ke]~w־bR{~XuۯTj+܏TjaIEHE^*3/ĩ m>^JePW'9WFtr+WQG_eE%Eד7EWik;!mq T*x2ҜΙke* ieN=5qoRIkygYTַߣ ZYWkλmt?̔ 'yP|X,KP_;5SJ:V1J^%s4 ؞>dsQu>>Ia!D ^8rGe5Cm\vFK!1(&S⬿}5qB4qkY[EAֽH k0(U7\DxVsZ+*;9"HS7c>K@XS4ݴ$4gu8UPyX9M4RPvpX*\cϨ?YwtBWE!/#]VrE12Q9)cRtB2o?'MX-؈ND*a͊YLiy,,E;~"u;!YAhW fõ29$* ^!G}5zn:k&Z|lRG.h1uxq+¢eC~/aM~܏SRW FApd9Gt>(>i٧SG[ 2aX#%pS< |5&%E*<c2olkjfm_AY>ijWڈ) ~c?1E.GS**rqP-vu┖rJۄkm4ɋk~]Tk\?p9;qSz=7,|hxr7}'_/g# epc[%)}Igmňbk-fu|Ip<`ŲOƣEGOo gMkm.uȶRjXyy)k>\@rIq̏`8͔dZC!, Q^~0N?%ՏSƖ4`Xdi90`E]ܭTC#M4IDFO__Op|Ϗ_>?{!}*(*i0O݋ּEҩ]]zỔlrK]R,˅T'> Ҏz%Oum;? F~&đ+F9$"X r^D2γLriRxFxZTZ&g"zΆq!:^@-#thdDZz萓d"> HxI1ѰVey 葹t:ә÷`C 5?˰?} PӃGK=egm4z)_c;9|WwMkR T58u'9XeR[*pN^o-zewАn{aNbT+۲jOhm/G7TK8-nӃ>v@#`Gobh 9{HI(0!At޹sn71)*.c,a\C@(LnΪ;o!xvq]#Ė kwdxk| []OgxVn,Prnz |VW=+j7#VY\UB.]RWnI]57gO9ntf)aܛu|M:vy%PXxyCͻ8z#3|uK7QJ7.1ߟ-ӼtO'nYnuC0[g5@'Vz L&?nxV~xGSn'W1*_+|('}~B剟=]Άhq-4qoaTL࿱ysqzB^6~y04g<O痣ڳZO5`U6z#pv1Ƣ?NjD |RLmJOh\gv^T¨FPôk>-Ef-@`ԕAU΢^XI3OX H@OKK+.eaDrV¨Db!ȍzV>}@=-ey=)g>}Ԝk,@!D)"")0RD27+eAx^+oܔ@ɞV$y)aUIb9M)ZtJwhS$h4 7n^Ja0ddG#NgmVKJL6YQ7`2F!E1Xɖk,GM :`WndV(XFcv`_J&X@tRbnϥ& <7^{-kS{H1dB\lMBFdC6 heb@mΪ֤(y\ x0w;cz[čGm㞸eYL5_0AJw(\&zE;Θ1j%brؙ"8 1i +(qj:UgE:EzՑtq|GEzŖˎLctLZY@LRh[zM1[sCCޱ=䏳`:*9M9,¾Taī [+^MxR[=. ߇ n;M^ośz^oSJ^֋7MxS/ԋ7MxS/ԋ7MxS/ԋ7퍛+VYƕgd}BϛR~FO0km#WEȧ툏+`3w s?`Dv[l=,+jY)Y xbUSdթªaL+3^gͼٍF_Lںs}<";j@D8lQ{M=~\NK~9f RPrfE?kgعm~sǏ8w+ϓE[E⵻fsvʭwzS+=JV*y[ӮmUJVjBc%omUJV*y[%oVUJV۪EmUj+y[%omUJd.KQ<u5d ͌h%&>4NH,G ;p8?>vqB)P_9TG^N6ei@P3ҁa9DpZpHH|i&laK,yB2 m&~I|h\dB?XCrX8EdI}b! md=]d ,6t\SvK]yjZ,.?[h@hNnEq.ΩɄeE I+Ǵw.MD0')4 Ch򩀕^Gbям"iU*p\nwӏ,Q V j8GEЋؘeA1B 4ZSFU442 A-9<%Oz\vid̻B(K"GƟ'\` eD@"j;9OmcT5E 4^^]LD)NÏ =ߍwYrz|G"7md4Az6^-H@DH%%AVm+#cpO&G04* 5˔r)0v% BRBPXգz,ie3}C7AyavMɒ>#Qǥ(-q(lH&Z+5^O4B;*)!.Kʊcnhp5)NDrA5V 9z<ʃ#nhnN`B[G)hx35@fL>繜S<|>O",JZcEG8a ѭIo| F(Z8l"SW4"TppGplpGGA `9HуMT\Q '^PHFdTNSm57FG>P1@#ƀfgBL l"BA ds$+ np6q(]w?vX^/Z۩5>?6R0St˛IΪ ѨHtx22E"$ҐEwޞC3i3XmrNn5rpn[^|[o47zo Zf'G1@'я'!@΃#o UWWooW _?x>|}ЀLh|5aG|~AE }Ѿ_FWKԋvdpYKEճv5̫73_~|NW8m{6im%/__^$^[bm[z{;OrowmNZ\R;Wtf| z}ko]ͪ]?):, ˓EQ}O=B1a4G>Mo&~ T+𱑄<n4Aem,.y-l~́r$N352Ju O#yr \4\Xύś&!r1 L$8NZĜSS8nJņsKeCys\Diܖiܣ;&߾ԺGAy߃?G̉8(чsSG0""ၻiE9f~+<T'6)͹Ozoypw5j6Zakw4{ڊ>Y ~2QRC3phIN =n@h"R !\p`2Qf$O,wv`#T:FW ,d :*D8AZj(.`AS 1:`$01\ Ͼg<|4=<7#ki$L :0 a')&3{c;0^TSdn0 u"&hpr<}aRYHYJOHsp.ͱc9j}1XJߡE?J Խ@Qin 6,JFYc1^>w3.aM,Z4p_)KIy\w Kf2*f#XE96h‰+ڏo^n-lD/>$YZ4i`xr'~2 (B0qdx5ǵ6FauYBr<0Qa!~6 _6~ 0ocm@${^=#7ݯ<_\Tm推j8?zCqX&gGt7MUWJۥz|}zazc$aә{攮LR{_Z}]2&CopQN469܃yOϘ^4p࿮822FdIQG?dZKJ`+IlEÏ<9Xsқ̃ WƁ>PBZA=NG4̍$l ?*67̡@q%jH4_ZvSI h`UI$I&8퐤4S$S2(7O&y.4JQCj8(5Nj!2D-4(CSDžupv x| 8N24j!59I  nQ'ˬH+\O!{.Rg-ךRkwiCL$iygLD<"D-в{u@jE2`}r}-u5 w75 >%g\Bl^z/mQD&Nbw*~jU屓4N sM,78; B;1߹V8H pL0.Pbxu65Z8A,|g%jЧZXڠC4N8$&B] ֓\e)`-KFGυV()5=&sm1u:%* dōъPV=Mн&!N^_f_笒aͭmWXPٻިlW.mc׫%tf@DQH/Y۷Ϫsev0HԮX.S޲Fa.LTgtzztv6/6NR*4]i&!Y{K6N]g v;{xBB /2W@Щ]fˏ^Db<^ 3nӯqj66ǫKHZj`5Qw#;ɼsc4ϺS!MkQ+5|z6[z:<[?~t L}|a^qnNA|zp^'!6MY8L:FdPPh~¤w4Aݽ=}@ z*EiLf,[kG/A%R2:' ٳVY$D2.Kf34AO|~_Y! ~C";dԂWvve#ty1mUn~⽉ !¼7*ĒW:8+yY)¢a.QԜ!:ga |Ht\`w:;εs,Tֻ2FaK!TQCs_v}n)2wJolZ}L-TY@A9uemd8{(E6\)7}@e*\{'rϴ\x?CKΘZΚŢv!f#v [Xz #`Exy]_h?`) .%P4X2U#,kja̳!d-Ut;!ܧcW H+}P^rcGRV!;rP(uB̍Ňdg+!?WQ}wpV&dK: + M!J`!y]IT@xr!(<8 0a 67L46&gTMK3wW]6nu,5k.5ߚ.%.旬'q˿^5oILr~d{=2Qx&G[^7]Fi>I F]cQ]a!L읲7FKey>G{^>r{0 ZU Gr9|7c&]=ysX5(,k#G:.7u"'i{m ǛK#NZFv=do)4zG]P]?Cx^QUId^7t}.vOo/lKlؿɛJۭődz٢,:(b}<7Tx3/}(/y݁t]Dđ+o#I!ς\8{XLrQuI2 8җ^lX9Qk=/p.mqp=hY:=W +_d>#] j (y=^~dOgg;ooEO]gL'.32\ܟxC'[K(EXpl۱VMbr-Wy\2L߯5;ܻAhdv2=_&oXi RYߜMPW7_O5VZ7?ļuf"oYJRa-](%\@:dIzR_9y$\&'eao7Z@޶5oT]>طS nHS" KP0qxk/,p" "z%g9Қ}Pa fI1S$D73˒/; {Z霤OU8=>>;ӥRT$6E1Ι$+T6T3c9$TKB}7ХDHZ1E͡m 2F(JI8E,[WbiL=D;pFs(39>dN=]@ Z#ʠnC#Ab6` BT0XM|F[bDêkJtx2w_0KUkwih!AkF(iٻ8=1_2ET<( /Wچ@.,-K1Z^x ˡGQϱP=OmVh 'W^0Q;pݨ{R ԐqRaaO\s9h\2J,oy`̌wBZ͋/$9z/qs1S 0rƋ@ 3/&eDxAv0`xD@+'#x!%[S]rg` c4ւZ` ^*vZ~a ǤT ,(y 9)' (&'J,vŋs5C [0N[ժXTLfrYqb!\<XGЄ\/{ .nHaRaVev HIa;:D̀j@o]6ǐE3@Ơ) 7* bȇV@ #u.T4E筍E6 ?50 lY.&R)] p`RP 3E6, ( 9l+L_d%' M5S!V1Dw3GPF`hБ%f 0 |ᠠ@HeW: lRRBAQ@IS']"lcU#2Bb|)Fr{!BI1QV  R! 'l61^GVcfXLrר*s-]b^\]1+$0_e.*ˆ `N'8؟S*Н R.R(}pLuU[(@u*K8*tp1 j;`U:@/&Ѓ?@$1-"Vd05ҹ~a`u'XO7 < ^J t+cx[X=$x*a̠Hp*# ϣAWawe~j&S^#AIt޲/pmSdRh;5,0k]`XCx碦yd`h1; _m }NaFWtlh+;5JXR(]&Xw B`! U0J$a\ @EX Q_IB Ya]r\8pl طӰwl-0 0N'B`X T( q#>j ޼Yj‚e~ 2#aQ`=+NF1sJJFڌRC>'(A >@$T5%i\4'p27n\U8/pzk7/5s;JUxH\ fa ;5apJf-th1xR_ 4 a7%xim녵9m:ِCL|U.uE]v)"?2 nVD>E "Cd Ē]/ 0p9 Vi56A8 FT(<]X<070&@5?R0a-sqhtR W6xި\]qM"]m̚?^H˦irh@gmp  Xv&J)1^"Hp91 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@/ $SbI>&0n=lj @ÂIL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&aO}0ފg?1{ݦחYB/ߩ<ݟUNq tK 'C\ifq \1SD\z ĥ;SܕSYdn\iPʕ% rWZ8Bi??G˳oL9Q)/G2 ~f50,U>[,Ujyi8W=[{}nlbZgՔXl Xo"M0H OS=n>?NG>\&V-|2Rx*܃53X Bł,İ^tf liQѻvH?yQeOVΑM ihmvyӯZ0!mdtFX.!>vwdP= ^w`{}ev g}W@ ;,%zL?XX6ߞ҃efd૽~B޶$K¢ECTl,w!C\;}}9<8r`(p8- ݔwg)OsUY^II|ᥑ68a$T̪zKg~R<{ov88_+hޯ / L@kE3ߜM>//U ]g=mlr~Wޟ}7h ÿj} OGk3$}v%Ǘw{|8M./%]ުTQ:Kyf1I=v&3G~h46]՝jH\+ve6>8(tp'EQ4M5%~BaTe'||@,׃{n+yo0toٺ_+zɮ^:S$TES adawIpt&sD*efBΖnh ¬3s2+2xsغHs}LB:KkIԶD.8`iV0#)F$L3qƈ࡛[Т-I 2ώ~3ͼ3?Y7WMZھ⎖38v~qc۪N.~#B g;.N(iVdQ\O%i5ؓQJ'(i&m{JR[W.yƻ$QlD=sl"4zz5>o7]Nnxc^f8f˩(L8u1ss`bfbK͘tQýyx/բq ]i;0A?n?>^^͟rxCZo9 Wvy_$uߢ{䠊F,Am"WM&}Mxf>( Sp` &{c CΰjX9;V Xڬ8Q.8Φ8S`L%c"ԒNRK1G=fC~ Lɧ*v,CeDm3{$˜`lRs,ˍR92Ō rUKY"r<8}&׳< :P^TU5UW&D+ j.3\w)&gOnx/u4(Io~xddҫ!]zM-q| U1̇ދaD۳˰͊j;mB X۷yC-_hmf_o[m =ʭoFrhR=[4ZT⺩Ԥ6i.Cۮ KE:(8m@W&{UÛ:]_?޴|>]{\T:VgMv=m?cap>WA?}\nk<}}1WLfڃM4mM @}-l9 ~/X><9lm 6k g^YaFkp\3z[ ͢'[[vj9}Ug滼;F1mݼݥi oiE¥iC~93'}!dL?ЂUk>Lئ~7iv=]ݕi9Mtr&J}L؛qTӻN# O )!J+?Ţׇ W7=ߓl ᢸhh=3\K`Ŵb!ĘrRdYѕĴzkeu*T+;+ᜧ ,1XILXV0[; i3\re2Qk=&%Q5*e1H:gG;k`f=-̉g]L@,`*T`,Pni[Q;.]vHMj5M㜙UNѐ$N'n1ӖNɉ(2h0.VR^B|mE9E`*۬!gғkk;ڤƣq-׶@ȃZcInsgЀbpM2ET&ugNJtLD!HsZ_.GMY͔+% ;4MYAE}pt9\%xx=-\^zҧ>U.,sQ78k0W$^BfHA r_.=zi!-!-Rh EUk 9rQ:G5e|9 `@Avo W;=eyNA׮ hl]A3/}}f~(m>/X j3ۛL/P͇\ c_ިdefcs5/E=~'?=~ձeէ2O+ P!sUv SJ*)0+>T l5s0%Zxr~ fk3~;# 6%M c9I)5 h1 myϝF]Bd3cx)n8Ȓ::'c\WReG,h%UCj8]` KHWhp $ f hqJvZ;+Isv4otRYbHrR$ШA ~Tc@P(g.dɱgMėה@G=&Y",D&gH91"J,*]Xȼ26Dn "XaT5ĬV&čN=@%zMvr5c1D-s)e(v7Y"ŅVC6! FݲױWձ1>Uu7xUhxpqrƄa[ִ<r>-^IiPWp&ζ~xCu+Z?zPt.k=F`b!V RF,խB5 L2W'>4i~]qhOݿ;_~kҕ@z1ǟh/4Ss쇉F5avqUM8C2Ua:(>I9 { 9;2ZJ.㞟L, &S2g{8p_RJ@*GaȜDfʍKqI<ã2p8-s[F>ajp|x ʻ 05i~w3Ewn{Ṟg0Ng}+up4iwMep[Ւ0+[uKg)mᨳOԃ*>=ndR,۪`xUzW M7:RV4}2 6x3}(~hOu構 'YʢFj.k[2b1LI4G,UɤGtRKCi:{œrOΏIIDF?w?|:~>~Gh\KUU"??O h~Zik5M-iZ|yӮ+vbnL',wÐh9J\{:Ut&88]<~js▟e?ENOZ7ܗz|۰y Ec?i!mMu7bucv`=㹠8Wzvd`%jLY!Lq:O0 WUTf([\\(Vl%x-G@JbTZ:a6VVjr~'Oh+~yLj(( ]q'tv3ɜ#dh"80JZЅtZFmMN)\)!VDZ)ds.tZTmXM529U*la-4mmVmRnӋW$]r>5yA;vC,hR+K Aa3YQK 45eXIZ I0MM$I4$#\2eېY8sR>D\K-v +Vjj%N*]5<䥍dR"=7)t\Gp e'XCR,#ibȄ 9 ٚ,jA5G,E"h|ryv~̽Ɲ˭5?6EE-C3^3qSòPy* *MXF R`uDԙS(֯1m1j.ke2 r3!h5jn$I 0hf툖j2Sh5DhAQOɐTlpUσ$9"i@bKlvWwW^]w~'9x$R{!\`%$v鍼8 K/޿bJx&DhmacD"ZOfX; sD'ߎЈ T}LJn]?tۧ%{/(Lduͮ麺wݝfNJ&Cw$Ahuשk592?\YZкjWw9tfMJ]E2uGnW-w^[r5?՟^5so4:_ӱ7LH{;x0fYOn6!nM~pUNBs;ϣEMځءw6xgZ{gewygXR0 L:Jbu(p|*Qɏ$/+ t0pU $ߒCR \qѮ;JRpW/@t.]-K(⊕yv`+tR>冑 sNO׋pZ6}؜;<ǰ(C$eTUoo1oM7ͫ-` AH9# 'r9?Q8Q+/GK|n?_ak`spW WSk\p464PPtF*ȋaʹP.J$ɌլW8'wԍAoeON~;jdo}GZSyztaX\]:7F02$\nZѥ̙U||n JL(>B*ƫZ”%q^EKUV.ԛ1M]Cbxd eD- 0}?Zf]:F.uVVndZq .6zw;pl#I(RKpGx4˦yڨC5\|[4, gH匋>XHѼIbs颣+@B/;d$ۑ$QL$.$UU,>Oig;Kskdͥg ;@ߪCշEк,5!B2+$j?T`=K Њ0-``=\%*5;WzNK2C-WNp~$+MN@}+<ՓW=N]7Iw$#'ɇmHU\a)G3lgem6h $(gBC45͇H6FN6gȔFv$ B1/#W6 S.A:{ ~Cv I- 3O5G(gD*E (Z4G FcoL"{m_$>) Ċvs˳mףKge ZJ&@.r'4A2Db021D%RT\0/r׿Mx^YHwD@4:&ɘfi%1p"=< {"a_U>amp )!6bab*B\RhEB(Sb" Y?~GnRWSAV3ń'JF!-ޤD"B(D#\-\!: >nRSU{N*f.zw?Q`9J9gIga95s^M*&ZcN|ȸy0S0H6`fZ~`JH%zd9KÜg.2;y@Bj5X4 u0b3{*+b7S1XUvޜY<լ /'^UX̖3g7UTv8 nEq `Ci$"Gb|H7MÐajfYa0,iX[|8 =ٻMWMQ Z=j=ɦQ*YؼI3> $ 6)Άeٜys3 *C7qCfVR]9,| h8%xtUU~L]5UboP~ 7^mï|GLNj]||k}1M$05 lG؂GCCs0^=4UмYW f\q)u㺛Ł" G Gy6݅Dw b~ww[up"d5F2E^idX;#y4hJ4yIa@gu/ml%/Nϟhpu@q !P`^Ǩ}Q@z`{+YtDE̜O$LoG[o jg'PK+}􃋹. Rxv̡XS%%bձfs=LVIa;Z,_& W1tjR 6uR)mKX&p)äBB2ҴbiFnComh?'jO c|[HU 㷎k̮16@y7m?}- {rZp]-ZF@-k$ic8V#yCLPdzӫvPH;g=0(|hau,sLV,q](9[cR;ĬW逰A2rVk/Q1[,A0VF%穮{9r|>U|^pmґ YrKU(.;R>i6,rs݇"r֗{~p=~4e&HծO7g6h|PHI*krP+pí]Ϡ٦r?\C Nz_?N{ӛ;CZi:3ZkʍjC\22Q?yDD֌< 'uiHT*W/ᄶ~AX= d15Jk-?_uy)T^J4"pqnv )&8vx^hvk#Z6SWWQ9lHD+*h0'Ltȩ@*ةM`B<UX-ziR}D%b=6M hBZ":vuFNKWr{7Ӄ0]6C z .wp..޵E+}y4ص1!EI5]Wpi'ZV&r%i1:z&@bRIvhȡuyzuɚNgmּիYt-shWھUyWJZ懔P9yX%k:/4ߍŨW}X~ݴm|nM~j(7rڭQn!I{0 :$v4%>)(ݱnn|2~qAIu*;]3Πs ˕ :,j"C2Uv. ٣d,d/TvNI' G" a+IeYY A4tTٻV {4u{ɤ#;nή-ǘ;CYf0!XhGo6m"L3p50$5Iy5QQ7_> !rroDu|P;#gA Bi4ڝ!Ľ3>(T}> 3=CÞmJ8Ak"6ѱH,0;,p(\DNJnB}TBDE1I٫N%&x#32xMHR]#cgGdΰ3 eX(XS,͙&l9I- Г'͋_6M?M_9bdErsM dd *Ry*q @b\2P|J;Elhd I3 j-$&%yI4R:"cmb jw:QxH #wJl7T9PHD=;4NB4ׄKAq!,iMŠAC3Т !BƂZ:Hc:YR{>$`ܛg]Ǧ;FDuD#"$DkM%G #,0;Ҡ*"HOV)"jn8@zHL8cu4(@&LR#1sr_Lj9U:rilqɆHPXw#.6pM VTs&$nFQN+bԈMF)GG\. v!fx0 _U c3E?r<ΪOwT.*BNVyz[i}.5[ZlKS{bы=~P 6igqAq$R;-24` %O?{ʍd/;3mQ|]bbfreɑl },jY(Y5dWOUO1̈́- q %/R\F4)<3`(UN+,/ -6-І)B 5\`=ͯ[e+)>O&,/RHZ9wLAs !S̢J&N9N IyPlObg빱hէ;qsh xT;)%r8PH5wEXfJ¥, N8.tkxa=+6-~/JAPSF&"`|!IJC4^GbяYS*pV1[b+o5kpPKt"GElL㶠v })*2jZfxJ;?ei#cBYj>4< c,&RQ9ymS:QpTUb2FCe6 Fo$bR=Wh m*q]-" /@2-) Ӷr[Gwd(SS-fR.f<"AS1+VN=ʩiRvڡ t2+ޞMɂ TDC'$t&/l`eC?vuP+!.nJT@ʊ*cp7 @ֿګRN3GG{fyS~c ׎ ZkIJVpFl\,R(^P^x8OC@:5=򙂦X|Z8f" %QVǫT,SF [kԺ9v0DYOז5y(rU$}$\S/G^~qɅGrӻ@]=&]Ьٻei=Yl7ΚrF)ZI#D1 r8Օ8{Q-EgQ--D%?кmZW[~֘\i2W6dŠK6~d BޟsfR.M)]f'Ӓo~oK궪m5Q抶aЇOkWx/~ZM 1 &hy8)+:}ñv8?,Sxl`d$0.WmB7($# 2*TA҇*&h`PL(IaMcG9K6'~{HfģA eY3SDJU5ʞ>B9zn,4 &I@e$qRG'qT:Pl8OBW+-T{Gdw6$L:+S|>Y1/`٢s|7Z*<+SO <8U@WHS)0c݁Mu`S2lkmx߹䆓p'?^5K-Dy1{kpDEMr-f@?(8j$َ =nDfk4)kLHkmj80(3Y`je6S\Ζ'z2ޔN _1׏m~Q6Ey|làl{_WX."cA(*%B0{%QޣgĆ V&YX,˧F08%q`.A$^1B"8I(Z*FU }\pXo;-ICx^~wSDr En}oi҆ZFW`z " u -pIT_0Xt\{ȕ] xrs:jz:,3#H|21$At hw(6xY"'uY/_, R'b G-FI*5 Ķqm45γ{eоvot6Iyp0̮{nw8yƝNRj+pA\3ĥ:-τDPmse6(>-ƚns=60f{<Ksj9ߪFf)Ѯpi֑ch:ʩ߳[zPV*ÄMַ0&mhvWXv.5ߵ5^)N)=pl݇-Ó#Qӛx[*Tc߰0S/<&j a7"Vʛ{f}9Lp=5n}ix! WhY8Dg&lMyDO.rTth?o< ܰٲS6B̓(ݗf=iSJE/SE+d@,VA8 ăT2!AhwJd%{x(.W+P>|MSTQj뼠! JdH([ F8Ec3zE^@.*[[L6v zzxڀ'ƅ'oNBnx(eǤ \䣂yԶ~۟b+j 4G0Xѳ9֫RW}e7>>%-<*(KˑG ^ X><ྶjrX{[4~WBqm$ aRFncStFXp\ ou(!-+bp`8J5ɝ ". .̃\y5߻`Zseh +]W)AgJKdKs8߉>!e]DYS] =G| s#@2Τd9 cDV6Ƣvw(DqŜ&3M WƁ>PBZA=.#df6F Gۋ i凷lvHR驉B)˾'y.4JQCj8(|j6V;Bd(}bB&,+ņtr:Q[i! B.RH"< ɣ1,a<)6KTsچPgkƓj>+v5{TEǜf*TEi; :13I:QW`T#Z:rQAPe4jL3O..8JF-zӱc$[tmW\cᄶVnu Jƙ*?or${ZY|p$. !ּK\cI]pwհ&ȹK-.ԶP> e"E`Р%ϕs|⠝ExTFD,[zdS8&g)!;_;^X`}^(-9 {1%Nܟ E>JN2Te!U9ߢTA@Pɛ;[CS@^by|H\kJ}. 1PKwQÿ3&HGUtgqY%j+X)vjbo8{@} GBϾzE ~aԣϞOQa<7$%'O,bY Z\͞ki]:{vH념wPaa,|z`TiaA^("hai}S9L`s*vEcz,XpkZ6$ PRj"WzL4b "tJT@A!T nV<kZ% {旝#קL2|Z~*YehAzZK^_fD{⽿G? 3ҐpV~6;+?ˤo.Ex5|kdvï~v~DhzQL#<]h0k=,x ٝFA qrdiht mxoE^,˟ o,mhZ͵7ݜ06jof;7tf| z|]ov;yFfm, =d1׀;}GωK_K\憹> y鵚/Qz_YCrK]R lࡡu9^8;%O輗,tk,+@EB \rD$%c@¢XP&r#ՊSEDF)AbI"{AGc3q= mAnV|V,_Y|R^\XmHK;vWu|$8,;&"~]!)SHT"1$ 5=UO?US]IyB,A,!,MD(j:g4W`T 6xv˜%yzWBPlD[cUda*Q#[sɳ;M-7Xжոۧb;!40 N<{(f}u1_>M{fH"cC4uIuZ3 =P H.=q %CP2 480:*AeHIS$[|g|тJ4{KeƢSSGh8`vX,);k% <)VX)/'-KcL=-q(=r..pzn;$1_}"*!nuյ{v[ d&ݻ>NYmP*QCf"}]PdTb(vHDF{jLvW{snEcGFk2ⳬv}3]_蟥lVBwge, DW Z.}o3oQcS>ڃ;DJV<]W }.9UZ[zsv9#i l Z \Ui:\W \i Fq>pUu.pU'WUʍkoox7vYrׇ_&^~p~{wy.o:ꥒ/EѯlXx6ϫϨKRKZM|=fq1/T>I _) H@?#7h:-+a?va}YVse G[j 7|_GWWص+Jͧe׿^c؆?\oqzy9{`V/y~~to^OAHvg~\&'2~Atn *o ~߻WCk~ۇۛQb#~,OdWy:6L[-O{9?r\>g/g+#6yC. 6kyBTnrN=Hk[=Rzmi*ўg*jˈ=x2}㏫KS(t2"0ͬE.8:G~&go<;\9Q*=lfq-4*BT)i(_Mb3SUϤLJItz5|rwglk/ywmG~ G$H,@#%Y)P(JjAhVK:U cņwRxL$-Odն@ZX+Hzx[]5׀=^{WLح_zΰ.bwvubsrKtnxn5B=oe~XFx|_7pى^t2fՑY]'84$%ppٿMJ %bTP{#O X9PKt{@kًeooW{q_1|bうPQ\\< & de9e1ǜBv&G5ASC$C4C[@q+*ebFbP*i-*c 8j~5yv;Zw~GѓM3]-3e%ȹbnicyava/@UժGiD ;y/!KT0@Y18LmAԔ]N`}꠭))Dx-3* Гcl[%&-I s?2*Ͱf1  o[Dd:ߺ]3qީ_Cx*L?ED퀈"$FTyT*+&+*30'H)uֱ .̞ض!!"ɂi2Rq&-e5mI# PuFZTj%̷:0.יKl%"oh|O4H ՆȐSAy dJ* X `)btJbSfq(Bc< azY<w% ُOhP+.Syy{^޷uz2}7B+=ঞ}Y.yj}s9w]+'[ި-et7xtTODv؞Re!aWz;ivUs`B4O)InI¾T';&ex"юLS[A٪h<$JB@&H%Ga͎1Ũ𒩒F4ZTqP`AIz!ev!p39H /7gqГ|p/%ߒV)my<5=%zuc}^=P]'ޯbq&FUR&(e2UW!TlUef.Y`g/ ( XJ6)j"p`ͼ8ZMZ(Ⱦm̋A `E`'4 CqB(XC;Κv+0u%ʇaK$"& A%;懢KD ³A thL Z{6U jL (Y5;EY2>yY0h2`طLvPwj5Zq1>%wc BY0«VO.޻ 4Fd*Da}mhxsu2;O[AX7#ĺA~2mMyst9ȋdv:# ՓB*H ]De{3my2۾ȩ2 k PV !1^L4ȳ, Do`㚌VVrua y]XooS'{tkbJO$j&:!aH6il{&s)/wx:{qN@bԫE]iDsڎHWMʌC%n$AjS/rI45/hj CSӃo=jԣ9s7֐w۽}34rQTS;+4r.H_e=ėcrtl_Eٔ Vȑ|(Jma .RDKTRt6V:gr'cJdr  Ȣ )ZyE~'%_/L}9Y-5p[{Mvv{:Gn|=|yll?)k}|+j]_ dKBGA| @8]2&eȐJͯ$>Ʊ%+j{< `px {#(s1ONM_7>$u8xA P̝;< H ^̷Wg\ \.p}}dLҡEc-Ier؆},S4(25Fv)CGR(/-Jݣ]h"YNଵ`ѰܲĹ#2Bw7آ>-֧q;ͻR<:c%YTs1P'YSLv6z_;c=ҵ(ȣPdDd#A N΀DB#MsdӜ2jk'Φq=n#ܓGhZ-LD"ThE݂AFI*QkJ"LZА*)VIB]FOlb}1n&ΞMgj6}_N_owm㋰;9g}Z۽ f[ŃƆ e:mZG$ *XTG 8>g_3OBzjUOy^Vxb` O&c0ܙ1R/Zӈp~˵VADTВYs?0JRa|,yR' $ˍ9pB8KhX1hUJ}cRXoFXWE? =Fԓ@QӖ]=N D6PYg/mWuv5.aM:[aVccKIMz a}F]7pB;3;F*x`R\L{(𝰛ZC9e]VMdhmhuX5diCD4n:ݗjLsito< 7ȇUw(ZD(l^Y%#U_b Y0&F "g͜ l(⡁7xN@ J57JOSG󂆄R/(!i"o)ՎV52tYTxcZ?_g qSNm!X3rI$GJr&$7}|8o 6 yD BcTDL0 wZeSl\&'Gs#X96Rg~Y8y/I_LӂYeOAR~77&Z-x.W`s*E `<)&ep[͚9 N0S0 zmM#0"[M2떅qj JXFLk^%pֱ$pv`Z39nKQ/]FXx2 .y3l /Wt!fZ\HցN>*ITt&B@ CON1"݅+g]2;ZiM8rL"9_E,dlHE>ܳ)&E@A@Pڽv+>oܸyOڻzM' v;%dq+Jǻ:߮36 wNg8ɕ-tm= jN3k{ /t>]G FfzٶHXHKBϨ{&X)5\ʿ#˿g*h˿3 qF!2b:K y 򲉢6)BM3Eb9̩y]FC:{!ih% PRj" ƜBDN HqR OȄ'aB\]I>8/4q<&]rME뤷Q:T%ydI6[$LZ{-QDw2"Cc1 ǸK`eNa9Y+(8́xV-?n8\fzQnKO0MXƮ12i4WR i(F}fǠ +'B(dE$ýp¨Ƚ₴m^>IYO9ͱ^ZG hGg 'V< (:d$њPy۞N? ]t4Im69ۯxN9 HAuﭧ<к *jc(@;"C[*Sտ-{7*oDezv=j_X+(8Wݚϋ%Z1'k-@霗 gJKdz8N:O/p@q5iej$ZR\FH23)9/K$%B16=DxU`flm:e%)Ĉ RѦv"q:G:[+">#o )I= vc]T4qyXM:1/],4$1ܜ$Fzj󁐤tJf܈t (6pPja=8v<4 . ZhXV#gGCJH D4VZBp$Bu! ɣ2,a<&)6%~B9V "}$V(`$$G\2Q JQLVZ&m@ ROMJ`Y# {$I1ev8믳5yY8wgg 7N.N+.Uvk] (¼w9m7w??u J^̏,;A iuC;S'3Ma4>2'߅ -C0v1"JrNeo\}S̞PF UG'$:N(ODT/`ɧT~^?6б8C*T:uFR"ղeY{zr죂Ӛ;LJ_.? -['p/0*YZV._O~\> 6s7{o˫rs?KΑ`8vRe췘~뉩ڞ@l馮V n map,]|8'wӛl2 zxes dc-cg:Ymt Vjϝ_n)͇('j|K;cv W:8G4wYUyH6T{PHJc㷟 eˇ~e}N6[Gg A] T߼kaEM.&\OJ٢_*|Q.H=>^P_c>ɂ}>ZEOa8B8khƨt!.NJdOӌm&oH/l+pxp4&_]e* +LϨd8 磮2p%3 ZuՕ9hKg"\~KI l>ڑ%${fVKn?dKrjڃnvG,ug\_wU癇wjn.0V01 r( ~ݛlX& PcYPZP> fHŐSP*s3m42&jry=x5e\NF/GZ+0r&bgazXJ%‹b1ݽ|)u3tB1c-@7%|.F=,\䯴Wm(-/" I(,4Ü 'Vދ14uWgJKT+٫st Z+$1jH!J`Vn!w3s)>ME$!EDIUo+iL~ʖ,{NѬ^րUWNJhH9/ * ] 24&p!qj5P@(>GNkVٮyo3X02_WetIso l>#:^zn ֟Q58PW047e`Z3^FNN|nx ʒDku( KԣJ[ZRJ6Vp:y2EAv Tzq#hU*;7 Sb# 59B_A{:nRvvS\rkj4mNSk qz̶3z>-"e65*Z*DTGadDWdpȅ]+Dc++9YWH ]!\s+Dk;(-+eճS" nx[W0tJձ%]z*5Q9dCW\ juBVtutDdDWXl l 2λNWR\ |AKd.thyAD)LOWGHWBON 4*Bl=]]Ie)e6*q ]!ڗx>]!ȞF[ PKFD 9e1 +/o-{,P8b$FB$/->m{k"Yez[l) #-"آ]:[i4l ˲G/e D95o}Rl  ]ZEL Q2ҕJ kϐ3g vBQ ҕf ZCǺ]!Zy PgOtU߲)1nšOV(h7c߁xOWV=5[ <{7 !+DHOWGHW02+=Yz;Dtut-dDWX0 ]!\Ir+DkH PJz:B]!`c+k̅4]+DiXOWGHWZElFt3+I6th9:]!Ja{:BR\QFTp> LEƵ(dHvprE6gV3Cx reP8_vNA(}آj~ _'znߵh´)-fd3Oh-]w%7kv&KRGmo@MHm28Jo,WQz*ɍ}`>|Ok_;P!J1-Rfhy#ՏRľ~Jۢ+h.th-:]JhOW_ ]-^*?8N998np>,]V謗PꎍJtoS ΠȈdU$Bwe@{zbJ]!`++f]+Dٵzz Xl  ]ZFh Q޺:FLRf3+2B,g uB$3 3E+{蝬vBˉ:]!J{:BRJa2+h>cW\J [W3IW./^F 9hW/oNpO[FW~^F'm r!\τ>¥4+2u+Q [Gheef<2+GGRc8HWVAr!3pE6CΈVu޺BtЕܲ)S>hGSᒻw7cGr=][ M5j+)O_&72~{2e UzR4r_[SgqD[AUt1V}r3XZs˒x2b`yFw1RPjp0&xd;.j.׻r,'?s'ƌϦquhq>x7ss?;p>ӔR,|yu齽/5MZ  ( \6߾Gn{x3TK\jsh J 8LشO’Pݸ2x|?ӂK+O~k~VWFeyG ,u0:ڂd)H0oR.q>0ٚ^Ž`b˟v|@7æ$M7oh2?j\*f'O7[{WT -Y*\~-ct|WW[V!խa2W8j6]ǿ6o/$]!O\ReN)S`u_Lgj# w{6T[JU=1ْQkGCE#ԅ2ZVăv)A[mQm Z@'᳤)D<sFGXPUJ51jClzmQ&n/lP]}Jzz?@珓C*YeNlI6lٗh ީ@6wEPSE^tR퇅=pY&SV~3W-TΑ))yX K69tLhcF*3*S,  ) 'zZ% .p:EwbK@Rm3ckpflָ5ؗ u\{.|Q.\#¼-2>{&_"fx 7O?GW؎D+\I6X$:2+Qs:R pR[elnxP /K&W%f6161a*צen ӌ]N'bv[ӎ}Y[ɇ\YrK fI؄[o1feR foai硅Q2XTy5!4Fp 2Nuε̇y+~zgQH]ac_F-3g4^B-$&L( Q*b'G 6A^k,pSi"k(-7Rp4A gPz#jI3Tx\2#iFtKyQ7.u%{"#b-Ş7h|3BK48A -dQRQ=\RSJ-arBY&B =//v[ӎ}̇t?>| [GR>,keUh`5/c`nw9*ѮSV<$k' :2~- .clSE5a[t11~tqPtq-q@e:194])thg ?0-%sϛM/ Pˍ!YK\ x9r:Y B|7S03Cя22Z9,p@jP~Q.pyz[8ٱۜ*n?u6[#tcʮ-f\M&Zp.IF n6WVv}پ* n%qc(DXb#&^Fph/ >.DJSnt79[ !_p~6><]ehxғҡ 닿Is} Pbpe 4:i )?,ژ(D*3ȢR55Ϛh{/h?kbSOQSl DMI%3B6TϪ֠Ղ]*| KM?]B꒓*tR0*mekrn}Le1n0|b6^C`tZX]sbc[GUʾutRZ\~Wقq0Dq/J\(a>E+Co=8e6īG Ic.)FJs x#JI48í5G`x%|BD X+ 9ZHΔt%Fr"Yr:Z6Zq8_Q404.Vk6V[Ȯ{/^ۦ[STPh4(0઄,PU) 5!􍷕[[o_CSr6lL@tOܲboy9طs +HMϪ vт5ZX(B]io#+|J15&EI/"i;XdϵOuleYGbWUŧu& MTx+fz\bI04LkA*EagFH!$V#QH/xEy[8o&%w"{6~EsFǩ||Erџo&i Z_' nLSofq3_/<|/KiN>qs 0t|g=C {F_;cx3{ 4D}AFZ _-/w 侯?hh#ٿT^FxûVnk\m5wcYv{!!k~[Nm0|#FGPxLrO*_?=$|sWD/ ^eg|/k2({٫;WsQ8.U%86חaԧĻ82 Ks3<§w\obRl7e%U܎ ګ^׃=q?]X6MUY=_ɼSŲa"jx5͡^",N^p2KBpx#`mH Etȳ!J1cQ^lj>W`o/ZґYc@?7fSk1'aH&aT!O8yҒS Ά A{nr NhMA(dI+0>H186䕳s9= nt &iLɌFnEY+cFZnbh "x;Hnn倉>ۻGGUruLSfA~d#D:* Nň󥣒LsBX3Q{4IB10"6O?~9㪢C{sxHj;2ŅK&q#o|aW 1'[WIi!IL(J)mA RrMr-6@jP o}b;IoRsx )iwJ{MQ)ڃG!2⨢8zX'N~~tzgA>۴59 &t0)&$2L;\(hՃTR@ 9zzvO =Uc  "^˜u1K2cNV A`^)4ަA<58~tTCT#:AoecE?>[f?ni[։ƽ_v@`RMV/5M%~NjW yiD'jDȠ)K17% [mːNJ?8D#uHhq-R5|'r!u%F?ː  #lѩMlÓCeBylBءv;iT6ozE}e3L;BL-Jz?6Mr7_^_]O>_wB?v og ˷/g!Pm_ln@jF{'f; !-i!-dV(VoQrX2ry5Vy$22kP" `mWWz X' 2M b4쀘Hs ZI笑9Xzqofu7ޓ^`]ӳ,~_juY7w-mPDPG$j{ Lfg>_ͧfkx֢I#6 &D/dax)J{v`q3t2tL祍yQZK֒^j郊hthOZ%uѸ$M.uu:7Ï^Kg}Xr{slO>/2/ S < 2zt&"FFjhwN\5W9p' &[rH,;>__Q+q]G%|gy!h)Lem6Vڍ9kow&ۻ_-*e]"ou#]&[r.t?_t[V7,0Z6jS-j:VX-hή 0~?Z[FLjoDޏ`\7#9{?"n}֤Dzy6Z"}×^s-luGX _of_ڭ}3틫qD@ƺ3Qx#yl=^w/#Ki!0T2}>&\E:y˓L"X^v hHd9d̃p\AŒ<=x]:el/W#g ' ӕ>7.sgzq^:쥭+auV]81y}"H-yuz㽦tScE56?dAT&D@V%ITl'zds;Sكh9@ld^1@AZi0jUv%YGoVÏnWf66y-=rK3)F"8sխ8'l7:'θ0b9DHDʧ(xZpyl7t5#mPLxdD`"HF&vQF"8ZBl@fU]0#|d;<9d@:RL݉)pc'$u^ 2R85iatH'a#{(ˋ$AaHc,>.gfN^lfJ)[}[MDw1#J!Nq'S+財dh&sC9qG_hv}.~x&?sϰ.RY6ޛ_c@~K9k*֥k үs-qPB. %K%ܗ ؼO7Pdhk/ao_7n#Q0 H+l@kg$OMFOSGR@}c _ R w: "Cs)qz'M;TAdQPqz7?b1rSlOmkR?Co2g@ OJUz>7;_G àj:a;\f޶9omĮx3o'n<ZoosAoJJjV X;\%.W߷ꛁ+Ae}wP.JMעמг|E,5ɥCZ0`gfOoElZ䉔i9y|nqu Z{VKkA76TJ.翾NBkV #W.K7Q p k^?BG1EFUy 5+GVG7U+nJ̞k?~O &uD9hkc^4;˜>b*(=܎mq &ݯG6,R A"r&D,s#.~f}r1|6{)tBh۲-W{ɱ6$Qs-9˝R 3| S=d `Cؐ6d `Cؐ6D `C Cؐ6d `Cؐ6d `Cؐ6d `Cؐ6d `Cؐ6 `Cؐ6d} 0!8 T7R䭤T$nѧT$.ՐRMTHʄ9٧m x+Q/۵FvRnZβ}خ6]{=K RVB32`k U cJ#68tkJ'g)0fsQ 8cm5#_zf`*ѠW󷫰&醴c_6d:$[@PG'Ym໏dz7iϹ<8l'-nޙb16sioqoxْKZRg-ͽNG`ŨwΫ?RGJ" fv(KAaЛbP Q'ؒQv%KQ9lHD+*h0'Lt~vFT ;u@`B<UX-ziRdpD%b=6M xBZ"z%荝_6S]>&}!NF=^oLf՞yK/bnkfR%cv Nz:[4aZ3Y+:$Eh3DZ'ŤWׄ9.ǟZUa:'6[zFvle-ZZ4O(?4Nvr<̧ O7ovED3"DI`J pGX. .` wLi3TDО WD #$$8pF1!1pz ",iP΁%MFbَwW.M1:{q~q7​-oӍs&$nFQN+bԈMF)G.{]x7<<&0MApC+E?!=qi| \nt%8AKdsSB\n96CxGyy_D>k_ ?AHRx =B1A+`䬏k?}dj"W R/1"!R&!iʈhA #( G!eLDg7v:yέql>|_JǔWm1lѴۜ4JM֤ԴRg?mgA 2F6p=NYJ! 쌂A$˵0+J bF)n$|.2lT_ȧGh%T8 ySmx }g%0lzcg;0L<D洤 U@yj%v@ s6 jezy!H +#7XH .qhEj$rn6cX^Ykdîma#f]W-ZlW!)CۘzRԷe7l8dOR 6p) ~ukqA ejuJ7F4'#9O^wn 0r%4uZ,{hj.N*]F}UWi`"N$Xt:8;e=p?f٬H\'lƳ+O~T0M%x,Χ7Y1,NCMѬl6ԍSYӪ 00e$e$HR-x6&e! `dӪ4P>cu'3nGn4%wCn}bEIL^v!Oi'˷N6ZGKG/;ZC+55V{ ɞ 4UhsݍvTѭ!L]Yt6JR>%Se[-?yYG5:OKik4͹SǧQm]k|W?OVd[ӋƏ[A* o da^01xWf6;Wshz_尃.'!$#2 cas fVTLwkI@Ke^X2{dzc1Kҏ 6ԟ4dIR4MaiӬM T9G4!H$P΄9#嚲SHÐ'}t~8NB`Ē} jF}SA(Ӥh_BNt))u1g瞏_b|O х9 c ӹ AAjH,mJEn4Ib RcUt~~g<^yd|ԻG?uw²ٯncj:z&I2 `T&2b4Ax?Qs{ZEs -!Ƃ-مL BI@oQK٢sho^t8ErR/=a~>ʏj7U_~-_OmmU_T'mR-'MI0(AB1:| ֆ*ۙ.]Pg<[J] I;/ VJe# P%&k3/רNHY*(TeD'QP0.$kC;ݱugK;k 0e܃-mcIK%a`" PLdP$Ȯ6R8)&V&Х RUF: <-;EY><-Xj0I%1;&nVgR+^i'V;JQtΩң1 &]xvF){%eh Z<G8z٘):JӣW$ f& QF~9:˚4 'lvFӶ\=XYd XEVdGknE]MsSṣ{S!7FOx^˸䧡Dʗl-2+4#ksE[;kgpVG_qtPU_23^Ⱦe=>w..P}We8[ ɔ nd̿x-߸g'˫lN|<쿚ڢIaڛ𵷧Uvo&<'_xf]Aɼ<,Tc')uq9pKϧooh XƘw:d8OӰxM`Lf>ISܲKYK.'D (*6)d߂]Z>>?] 1 춾M{ML!'\ˢC_18S3AbjSl=l̿$>%~.<ϋLܜn&8{@e<2M>v-l M'љ;3H>:)o=9G@b07$,uB!I8z8}#ط8" |J$-jdR􅵀$gL6c Is4f$ s&rz dUˬ$+rNӷ9UifW\|%y T6O,>uC9d6p$l*"TNx;1ޅz{!͢5Ӱ!*je{dsCrѷsJwߤ۶C+ZƳ;bscP D~ 4>KZ) dwhmvێv?cbN"sJ6C]'> `*CYv@6>jŘ{Թ(D)tD BƭK38kDfCI-b]~`{{W^~<8}$HԼ+>Fy h\b]dv92W7ӏ_?tI^Ȫ!b}C.0)/!Qg>ΤLΤL4V:6 ML!AuHB*>6#Il$Ϊԩt+$K62dEp\Hz&ұ Qs^qNj[kϭvk~O22 5VR K՛Z.ĘWWS:׫*\ |׳(IYȚ,ꂩoquDmĘ{!d9NXA 0P9x7NIPbK"KHעw g&`8]?bҋ'oy~~ [սJ{)/2FK-R* #d.FN$e`+Ti:'5FtR]W3VBսc~Ef;5W*ڹ.~w59۹״v!ZЅ Wws"mAVQ^CC0 =xq(%˞T)Y)Q\ɔC=Ȳ{Yk'J"Jns$EQ3{E-Alt>Y>f%̾g֨Dd*38ԥ:8?=_O? N.^ (x̲bE:(bFrI<ѥ87QClJ(%mҀkZ!۔qr1$t}:|b]rY-?xjJHXPеzaTD&S E9J D䛯P>r7)F%#l%T u1I"8[y3PPlWhf_bl _luIs%E4hTDlEF*Rg$C!f2o'm66ͱ.Ul[{ 'ܤAr- ϟⲮ_N6H7Eh .WDp1.KUVD!8ELaXzi%Zwɖ,ԧ5&=f#ep q3~1_K8P'QJ0p3`'a0Y+\C]j,cWg҉w3IL;?4 ]l>'r;hց6vݜO!}$ĀMoZݛ[ϵwhꬵp8Ͻi6ײY~r[{J=hsDrrݺziQK>G<µFʻ1tRjlJ vv~*v??hC&5d%%/,Ԥx &(B0Ө^;/i%G9j7h- ˺ cY eRS# Q4jnd-=l++g|o|s|_ⱭK-fQF1gj~QR;( nO>dDJhsҌ=[UA%tY ʒP]c)ikɟ w6z_=bj5[e~ 3Fs$JxEGbJ)%2N^5Aڜh9+D(nN(xg:xuy>}VﶰJ;X&m)vR|G䏫 m8y6P RJ5F%-Ry9Y9~ϑ5UYpQWmNW%{ ]#^%tu'_Su ܨ~Ug8O`G ]}b ՀkRjuj QҕZ]?!LtƟ<] AҕaE$uWpEW.R h @iD]=Gl <8CWb@kN@Hҕc|\]0?ະhG ]=C%w&~q`Qď4Nh\smψC؅IWѝ{#2\c"2ZN]d Dd(oۿ1e{ !vvY?;H(t3 2M}ߌMʗǃ(o>@Jmoo_nvs}`9luw qT.6&]?hy1q|&擔>szOq'a}Ԯ^#SnĽ*G(~w|0>SU-|\X*D# e"O3p@_5片Y)̏@`^!.@rKƕ#Am޿.oZxn_'y-i˝K2F;<UhQQ6ws"_(ۋoCxćGvi3~Xn x}pm'*JEF۸636f3ZYn^ߙ/~9DSͨs !&-Ue]δ )K5lXOKX15A:R/ _juU*buXC@.UV 9Y Dk#lhCk5Dfř)V1K&ׂɈdm#è1t2R9E>d'bhh챁< j*zwԬ)"l5@&g2kFȦT92T#b!;$3k ]ccrѽ Eg[TKNIP}"6Z}%:qTĬ\)=h0RBjM":Nr*0B? ؐ9 FV nøxD )'pp_QG;MunE"NԌMܢ+re_'|0!46t:D7'XsQkU З|m@ jTNE>'3k],VՐ jՕo* {꩛Pc!Mɀ1s#Out:kZjCu:v4KZz.M(VLeg}`9ڐQ[rUCEEG |WZS0mMV촍ʄYÀDmUXu>:_ gC[ǚȭL%OE"n%l0"bk.lo1D +aXv< V4ozTZ{j,FUh] P;wMBAZQ6K#9vx XٕQ!\5",+5GVdVG!}AP$xR, |LlR%&CijV2TW\M M'Idͼ޴X q܁EmTzCŭ,ȸAMACX'cP CР,Lh4vKuEButѷt,r%AuPiP>o9@qV26H| UF2bx蘊=GM`]@eg:#.hZ R|" LԴjQyuA}rֶ5̨tq yy`ih^8 rH×udeJ0ԭ)cQx $nGe6 *YȩՏUC^XXE9լmC7YKZةi:B?$N%[>;F,M%8H3obM=QGA]RH9c  /fs7DRD +`X߳鮏G0)ڽ,CkJ q[bH Φ$v F zbB qq% ;ͪ]Bp[#h)lzh2Б5{Mj:LMTQDi%&HtyP*Z3zw7NWpj,:ͳ0L]%1#,[Jm;Qb58{-4K #lC$5Z Ԁά!;6R[ƥx/GtIѢ6HªL$` f}Ao3lhT [L CFih|԰9Gy>/^`t^j3In<0 &yhLi1 l\&PJv(ukT֚8%9OSFhׄ b P/F~3=.iV{ұs8ݐh!/Q[øb tyBܡf֤D+9)TP=`mSFCj1VQHOORC֡뛷f 6Ł*'݉n!o6J1N}rӐpO6r%֓ bZ0za)+ZLjiln֡H,pU/xp`\SMFe]2B9{ tmr 4=砸׮9q5 o:5^ b;ڡb*gMZ׌vlMEkV3^F\.O34%0hG4k1j4 B'a903i5VC p \t*N Yc6ՔWnK9H02\P Y59nR: 1ibl$TipM(h]]~Bz;D;[0BRTak,Uq-nn׆Aru>vu:]>wᄏym|6)PkrZ#f6^8V|b竝-[]HK+뿭vt8EmW͇`B\=vq`O7/_g ]Z~& ]㨷.ݺU~w Xd\`61.q2]q hO޸Fa,K?zQJG[ ]ѴZg (te_. _KWr pCK+ Ad'{WFėYR6@0kL}yk,%ٝ\˱e[RY*FK:,^K^:Eeg0UBog ::A\q_\ޛapϵ Gyzf^Hg yu;Cvu=@8_N\٤UH]ʏ(!\p C~3*S59>j6s6\v #?n0)'tg4{O[,f10bx3{Wƨ$H*`*!U<$ʦWuR#ܧ:woJ6A ꝫNBekͬ&/0qV},)YZ읺Zi6U&s*FVƆTTl =1C*k1 X,ńAe2ʵM;*<ꁜ*;:]m ˩:*]mQjK] ɂ>ZCWq3Zz( 8]/]CWEU d:I\0ѕțCW`TMJ+Di;]!JU^ƲOG FS_^$T'^kl`6؅EOT"T'r™*"0,EC)mF+i{یRA۞bo (T?CθJk#7E٢ ʖ||(wAG8Ü3tޕɎO5K 1U ]eGdKd$!])ҕDIt)'Z2JY ]!Z~ueiA]}?tEY ?:]mÏ tZq-yD6[jתBJa +x)pܽZeNW4H@GW'HWL}б} +Vo!\ ]eJ2J#;:AQR3'yStp(DZѶ4m t]]!`t1tpR %פ+!% X3vbADkh35t*FkպY+OJ4p R(Uq;ۘ)Qs>v Դ|,,Y1S{rIL(Ę @.h^biMVyW|3;6eD?(&N7S @W^ ]!\Ahh;]e :EJJC +CW./f.ևAe 2?E2+ +*틓t(f;]NWUΈcGӣoWyj;8mҴ,mAW]jA +,u9tpAWtEWv4mSW]V2`-+ BW-UF)DGW'HWق51[VCW.UF+tQΎBW` [(2rƮ2ZzgQC::Fj. X2\UF莮NKkm?N=sJ]0F%SEδu1(/yƄ&T)6.Fy~!/ ר@e!V@2NPe W= `X hW|W7+Uo^K_U9j/cU6! ?W]/_~ ߼~wtm X;Go6Rq}_<ѯuL$< uke.[ˆ b./.CXŀ8ݠ^%zC{?c_`^ hISjX*R`ժ%M.DD~]H '2jn A[+wS.T9 Qcݶ*v@BNX65Ugb70_[sQ_PE-4@ ( `&:>x/oDR%L@@{:dD9λ.胴y䤎GvØb p`#y ӷ<~a8->#i8ZK?! hWاT?y|P⛜>}qwNYgxwuWypFS\ex L~zB1cAsZOs>8QZɄөk۱w\Q!)OMbI %F+$;sh x=".e̒dbIC(r˦CϞ%`騔!\zϒ@(:!.tkx18lgu4PG3T}&*pDCxbb@OѠ>$IiF$@:~%Q^I&d^A#T58Ѩ%:E"6&[PL;Ri_kʨVcRm2Io/?oG]ad,P ?O;H dTD(wruvqLz'mlRUhgs6'46}oj<~.% 9ݴVSo0z#QѼT&Hϰۦ߫I(ɴ$Ȯ57"خ'TXӶ4Lm_L)?fevKl ME/+ohF:vͣcu4f!pP8z=s>&$"JI/tm:'evtƈZHxe@Bw8jRR<)Fx8! %ts,s*Z}cv`k]i>W4!z ViϮ dCz  xm#)ξwrܰd&41xd-rJß":x$7:ߞwٖz- }43y-xΓKCuz g /)5IL?S O޻hjDu'g ݨ-%LynӇy\r>ݮ\i<ۂ";ې|+Os SvdHEGKPI_W6B6)[(hq`|'66nGmWѭLW]z-k޳{H+I}dzÐ\ǚ;o@ޅbο4=e֘_?xIۍ?osyMSW:+2 oJIpr~Y)\ߖuj<{A<5 S";sz5jhf=ul6.7\m7t{# ׵{RsyjZmދa6[ĔVj|aE{[˒хUjYʮ<7۽l}t*Em8hbʹFN QpmGDDŽDz#z7Fn 1n u2 aJ IhzZ0 q>Hrf%< +Ƴ N^ށ!USg_ 1.ph)YN$"(a1Q&d#*1Q'>K*9]}X$aM$/^s2p1DdYO #M(ubr+=ADZMuJE^3"`S2Ymқڞ ۃz\l|^GW!<2 El.yMxp`*GT?n>u Phӑ^ݦibTIA!EιH"YoC]Q뜖؊DR"hVsDG\3)5P k=Pו4rzbi6h(lFst`hRXz@! B8 4ɤw3piPU"F.45AxgO[>ˎ~=,SD}FchqD–_KmXKm]3!-ì {CZȰdsooQQGFz!R\vQ70T@l᥷Eb W}YW7SԒ5 I! q@Bz"" ODH<a U[k4OZ`;3 bs}}倎aCwͲ-C-N=Գ 9Իp 9 Iq+<:R Z4T#<8!_|Bx1~kb`<= u@ 㝌!4hA;/$1jAI`QyN Piƿx9(~m96J}+B"]Κ*ۄvo|Ի1tZQ5ІGGc$4A%.(Jt8nT&o ,ZGp Xq6y>LGfL7T)uQWd..}C醛-:Eh+zͲK͒oY#B;*QtXΦջ ^v& p>B c,dVG׫{R!́GURT4sH$Up Q ,)11HDq]a@Gc}NH,1 rIhg @|H9gs`4Bˏ+1W2EO c0 USIALWU?J394`N34pxޜVϓAVNsigpp 2[,p^;dX&}=Ed4y-#0OgaQ+lǸǿŷVSEy=al%9ݼO37&aИs#BMЁbmD|0! l.FQnDŽQ$B%9:ՊsÑ+KrFhE'pW`=uU^@+k\a85ӓ`<çT}^&.B8\/1"U굽WIooXG"pNQتv9gjTJԌS¿8'70*ύ y2n=DkYe`v`l u8NYެqd0UEp-wwnI{h놵wSGifLa<`^|OƣB7}/uWF:d[msU-2;a#c4dW% &i1=I핼(!ZAaX,\}}?? :]8u޲YR; PR +\AhHTQuSUSYOq8 O($O>~᧏'NtB9O?sO^fr@a}~}#p~e%ko57ben|~mSn*V{+~EDa\ȑv|PyXo }}6ۗ_ՏVăt$߫-H!GsZƨ(%68-rf ibNZVu&oב0.E= 16OwVRKͥKѺ xD}2\̄["P戛DUħS۷5#C ;ʰꟾA~xv^FFI/ª2RZ IL&JD1^Ⱥ`4x_$j)` s,N!3/#-ͧD}Y6 ^wҒ/o,EiQCgT+Oau5?ׯNY⪈JBJ ' XClvQ=XST)4ER)1f*@UҀԆʥx"7xޙdSf7 L } 80KEjHD(O!Ҟn gKsr-_Bzlz]!xXxUe*ˤ>߂"% E/TP9f7nNIO.gM6϶ڡ9#.3sS0oH㵗n[uq96nլb[nݞCfMZ9T;ћ9o'㴝&Mt{l45֜Ot[dMIx:+ Mٺ!9Zo;>JRd=Q"pP1.z.Q%P9cN ) ڲCr@x'wIo 4$qɍTyĄlfT8VJ R޶;5NVQ㯹ٰ;w-mI P~|1vmSEV EFL8Hz:VVc[}ILeIsyvE(-xfZfLdSFzg9=Sԃ;X#h%.Ep:EEe 68w3c}Jm\Xd˅f.>(^͙2>4,N/(p8<<؎xr,T&ŕFf\N&(8hj >:{.2½g!mr Q"jfUR̄Mªbjf~<[.fSX68Mڝ|ȕUZ* j]t0HfDOu-U8ƬLjB !(v4 2@1yp+# T'>68waF9>4fZ]q,#ښt1⭙P & <2IBu 02 5\8Uɛ=PB')8S3>`DRIQ͓ Oxq\jR6)9u8!$Rha%%/6kċ8>| 14ےǧ~ioёRrBIk=GJ?`GJJMw )Ũޛt#W]!\BWV䴣+x-+,h ]\-U[ ZaNWQF;:Ek[DWضضt(%J2ubWBT-'M+D))ҕb(" ք`M+DuGWHW gªۋE? POzӣzQڵKj"&2ݽZm.哷 fh!(1{c28 *_$AxY184`9?REo~;BS`B\ fEi%{v9ݫ;]wާ;qRƪ9^l:a;{{%ͤR8Jb.Cn[9L;qk<_JRǍ.4VNd_>Wųb2-sC;w>}J{R)!ڼ}t5gF)`!m4D6~PR";}3amCƶ.%-thE#߸V4ӡ+ˍm'pik6-3M+[mNĞ]/zDPՓAjg0: z": mX8DGWv=j"S++D[ *tBwtutńl0m+DY Qn/;:Z*[DW\2Bdk *B6JHiIbWB*"Hk RҚ}%|l܊㎮@lS +&ZCWWƺB ܚtJqc%}xVYsnrH/DBC> /ne%~[gw˜V&*r)ޖIy̥0[kòTAM>Q= 03v+ mh • 9#ڇ6V4qVVmrgwWBqޟ%] ]Ԩi]`hk պt(mg]"]Y͕lӄ>ִ=!g+Y[ њ;Ґ8t%z#ZKVR?1]W?q PJvtulSC{⭡+S+@ˉl:]!J::Ab[DW*B^+DIeGW'HWim +aZCWWƺB4] ] +i]`CW?u"ahi:]!J::AV+FZDWXĮ.ouhjA(73գЕzko-`>lA&r%4Ucn ME8HLm9G U[/Ķ笖:Ya}\$$K)S]-c\*s B(ben7h  v~eWMb5·irF0QK7 ~o߂:sPՕ V5ڱ҂I5F5s?ß7% ŵTe2 -h>>[ݺn|eT_׋?Li}_Ʊ7ר9/Vd0]An8Wu#dG >WClAPAdRR`_WmX{ú0ٙ$K)T`fBCb$ȵDDCELD&W~_`4/X]_|]" on/ dKuߦ Xb1Q"9䠜pᩌI}HdEd&2*D jLyp1Gɔf.Dc PUPzDsYʢO\Y4XߎcޥK3bJZ-J$U`G$a&ïXʪ~ 4t>]NA(=> _)bAt4j;/Hbۗ E^-WiV|rjhVTA؛Y,BuĔLh_п# A#ÕirvϚq^qw5D,x9앷҅c9ḣD8pyF`d(c^Z<'eYKpH!FJ:{pܚMp8K;&grD7A砠Z(H2(+bԀ L)̠\eR0ntb|KB)_%<4qd%6s*1%Ah4Y* єjЈ@5F;TѿIFi1)E6nVֽ?8X@gJ$t_H!QmYve0H9s0 IĿ ~7--Vs'#TdkY=&р-JiC4āH;EW wu&D5`>/lVTZ1&\ < [a}/U;k.vUvWX}c56Ѹ'qW=9nkbIލ2r{FuW[`[7Ma>I+%ϏI2;b 3ڣrWtShešb9W<|LS* 0axzo4\:ò ߾EokImhqsP?I⇟^՛?=eOa,z zooZҺVUs#Zߧ^[gw*ٺp+ 1N@L<>&^Uf戴fG׭|$>ڸ;]#wN UX%F,g=XIHr3&:鱃 +r)+W ϱV)q+QA@Ѻ x2p!0{"bȁHEP4לws:4_6%6hZŹ}fv -Hzlb>8|Vˋ:NF+~nE3;n!t9nu;Һ-s<#͞K=C{6[ԲB[kzHk_Y9@J0Lv[~;~ṅ")G5R刻,_W<$dKI9f=9FSD4g, C,Y1jHoro,!V3- ^vvT\d&b[/ch WM1f@,Jյ3DzQ"'nkIZ[&T%ud6J$U΁vֽqYlw(*}ݕ쨣e&|0ǐ%~Ȳ6l9p*qY^vVM ܼ_8$nV)"( ^̥- p:Y^2jUZz%7 H!N3#V%XflF)̸6HQ8:UȡNB)*>d6hS8iHK'N3mUQe+WLL2HT`gQVoGVJMIlԙ;JTkwHn kW햱eR Ƽ֜f`NJh>Iy3[2̵֭4XЕCgy-8Ԥif{őZ"QP }3NViY4-phC#u HB;sc1 "  %[qT̓ǣ u)t:;.Ӛ>E}5m1;?o)4jtE -mtvu.=;zvdJk^PM#4Z)^JM#$4*4i 4W:3o~WRXBrɉ p 4/_ʗç|@>d@+) X\&E`9f+%*ioqFXkr'fBSfG`֢mZ((eF>b] <ʾ\/؇l)E1iXt&7˹{E;o?h|. >|Owc9u!S͇䣧D1U?BX~5H`*joahU\dDdSwl.řq/࿖SN'Q=y)kyķm"#g#TXd:Vj+켰,"B&-/r+ 'ʘ +$Ӷ*&"*2cjBJ 0+T`8HIJQlV&պs`ũwN~.J]G]hworHv_0<'s TVVvy$ïJ8BXV c\0+  0aQ5O ƿƏɢU=eׅ3OgyY-g'%=^_ur%~EqnӔGN/'WW%j?p7zF4U40||foy:̆5eUۨՇ0*χ<^Rov$8!82G׫g=_^`3M7=fvB ~3!y=\_WhB$Njw_? -oJ_-&޽*5,[$hܨ// {5|liMUQr6+e ڻMNJEn g~UNJ7]?<'7^s?/@ޅ$glxR1er]N-"CnoL-m_ ّ~Wf^5.U56ىTcbfDޟ s5ūܷɘv|۩=&iqff`gNJB0W\)9G+0TfE2ZYVs>?` Dag"ַ灀۳kq|/QuEgƏ{|JXA`T@ `pȯ> ogc:(U@i%nV`e:)]| ؏a\O}֠N?S $oYt Ng9I"OgG)j:fSܻ"y9.UNsV=#◶eˣ XlKEyl =x/P9srF JBn FXf(\)'T68kf$yq :[ӂޚٵ$7gٜV@*X"\-7nUpA oKMcmK-y`rLmwiNG w/m .4ʯn*Ww=*Ĩ=CBX9ˇsl)HjYΌRG!gi`Kz$BvGDi̠1ǧE,j)God)UnEV<勬C,ǔaugTIL.Eh znVdY ctUMj(H[: (<<~9l#>Oe'ިϘNzASh!+eU6lil%S>H4d6J4j q(/hOs,glq? R i@S &b39fL bDRDYX--<:K!IA5U uZn|:c 2s+uK٭ ;1hZOS6iz*D6W_ocic%KUm[;=3't*U_ h$cLsɾ |![ lF$5|'<>a&.qvڭJOY .\٤>9Ϳ߳?^I8ꖺ:VDWs-ZKPhY%eIH YsUNq:r> H*xs4.q{ͩ)@\bsB%LvQ#SL:)O .+3ӝ] ˱:{7'A-K胲N)΢k,tEMuϸgnӵwT%]Im04Ӂ^՗Qȇ|/`azh}f|@q}0A2=dz>1o)=RehQ]5$bN`*qf+VYNrO4 K 4Zj S8S)k1M, F{װ{^˫פW\[zַEYW Nm`=#y:,u1 $w>7" Tc2V _HhZzw}-=EwK輗j~Q"c@'ƒ1xpRI1"Q%aO ʋm$n4/&9X#i"EтD8(B+u-V"hb+0NFeM{ FrFq1gȉ 3Zέ,wV%˹q$8,g>qM&'ǵ U5}<)8-xzv<?U1k)y5_{G,ø"/1&0-%mbDU+44-KCe^ŁFiffɢ7yqS{ғAhCj6:ziwxπ"7dv"~lȃsIT YF1aV+&(P};Z7ve[׋'w OJcHlc,K/O_IB <^򻳋l,%ui PCND ^uFnpyù_ǹtFE=r~uj}KL_nu~rǴ3A/Mש/ߣA7 ;-( Q:Xߊg%!P"i JBl~i_D>M d$-&¿Y:x 3^MY@Xjܐu# &4Oel5(|3=>Vr ɵvܳM6i9mJDN[;.]J;\]7 rZ 첕Siiil圵rM];b!tvߙMMnL[))r 5ڽKFv[sz!/1:}߄W>6q(':+*c#V\\yMM'{RrC 2 }8dUpIyS0I&Ul4f'l6}$X6p Ic}H`Q(XJýA-1/)4 k= ]rfi|Q#T!F)c #,'@2$):A6&R vpN2FܖciQ8u0MLΧ6<0N h,0V9\2N-相~w ?^ѿ&'}>=̐7:G?,ӏ<<4Du7aa۫sqGuѸgi|#*(/|\>Еp9j;u ]m#9ۡl]-JtsCtE+ȕ+tUB骠D ҕ@%u%U: COW3J;DW]u Z m+BitutUoU|s=ٹjl;]m;;իrpCtE%W`W誠]tutExK.%ݮO74gMfፎ( wAZsbÛ7g\MooEPʟ3f.yJ ')iXJ JȕYzv95V~8kܧ#Q5ݗl} r]O/iV=l0M朒NVde6ԣ"'  .F-`L }P+@ޓ<v)9GV<ɳEwiѣ<WCW<ʂڶ{JQ:Ft3tUJ*hꃷNVZUlzޙhV JOoWeE]C;7/Ӓ.րJR}bUmߦu?doݫd, B孲Rp>6Oޤ?ǝXT2 X&߿߆?w?.'edžNgD"g4?\U2tG%3 !TޚQ91O`tF-&T1E/=O>3 ٠lRpC:.`(pegޣ]RGNr]Tbe T$$*烮X)VG8$W$?LErn]U$ߩT|',HZy9eA0y\R ǨA%gxId-fl6rcAg$dA)(1ŌaElBV2CI@OfHYrcrYFm%*@IH2 5[ h{dn-h x}A4,˅bYK1_=@S:Z#\H ̆HUYdd吼HQ*.=à(5b7Y#Y΂R  \ ))h؈ވgបc5Quoui|}uҎ羴Q&t-iS.;m' 󲋺.f~K﻾仚\fS)m^?sMJCKf56 4) W՞:s~+841*R!G)JBl![os2ɗIv9:o YΌRjTzr$R uQ9`yughХ(x*18UT o9mNvw:A kra|H~ ^h叨߶,}4&] N'1+.#аCVʪm!2Ҽ |hUҜ쐓F F 76w/%&@I.-yEhVs [J{,9$DLD"gs޵q$e/v~?] 8pgHbk_ _9$E <-S3GUZB: S΢YF-n<"}`X`7!T,=utb$*qRI.P3o[H 3wdDHVenQy4_U A1)* QԒe r0O>i=.#gyyl !׺^6}hlB[2ԛȬLmO <4!$-PQ 2] \{oEH\1B/2FZ5px5kHB$J")!d`$PO1&$L+Mxjp5réRK>Gb.x9Õ9:t.v\p:iW 7 `o=y&#ȴ694kݓ!zINz<*,^x>@h }*E 21|k0=zQv5`-%Ik pZ)C*mSR$*ɭ5cv+昨= Dy:{ö {LPn?ܣX?\7=MGenp3'% h/gJƜ$7\H )(gh@ >ڦSSU`CFe)rv*7ĥ31hLA#UF Xc,:"<˱e8p!p"O U`Up3)`H Rz\hS8ETX`)ʥ6w5-0NC>=JT*q2~xmQ!E=7D q^;ͤ4lp ܍g_H拵Pa sdcyÜ }vkK8/9@3KZN=ed4y-#港twqGmLÝjQ$@c%jN$#4:y #)X{渕4q_FYy<8o{5[teZ  IeE- oO r!B`Jqf&dᆑsS퇙l*Uvא< 6T82XsB@@q9;?sN'{,?ֿ)|Ԏec;+,ŧ"LYڲ'ʂ0H9s`\Yo["Vsڃ*2õ`$HLTV\y&R~5p8B+5W D5m-ꫤ:ţ f M}*M.kTJԌV]ﷸO\5}cB/iQgZ~߼^|w;Z•Q2ɟPH^?ᄏǏw7pBΜDO&ק@gMO7-i[Mc{˦4-bi׶v]8[nb%B { f+9Ҍ bÁhPzUn}չx@|pSfb Uuf(fT؅z]5?Yx^mW8lzji.,zjLyv>t nhnnL¨.*VDVpF@5)yY Di4ؒd%amv. C.9KJQTcbʡ8rE9 ,"R6tlqOs*{*{&X8! V_v|KRd=Q"pP19uуu*a&8攠2*-=T!$2?K?Ep,#h%7FPeZZfϨaZ)-Ka7QnnQܺd6llx \5 d#))νb޲/M)"5"{ꠌN1&I9ZDT2=9:"W UhI$x4*Q -{E \ry-a`UU]c!mY DX_1rs{?>aB>y0/kx_x E t5 _W"_O޺ٻۄC1G?xz$qxONc_F._p|Q+i]&)x=va|QQGr+ #g*ϾW?/A,dPzpHw8tQaszc]jX-9pdBڭuQ\?1E]/]6ۢt46PǢ/(>̙8+p5wt ']@|rz4?FkW[?'7+$#*jK/V11|lzh뎇U.U?:s͒IO sw~ NN4Cr礃ZHe0 >&eLp%!̶j@yTJpB&S#5i`*Qfs3DbKĶlh}ҧޅ7Owզԥx#1k d,;rr=7N<:2z{Zk1{~gZȑҧcp+mrٺ+bYTH^4QJ9ָdb@@?V >f-R:be.RP |Y%A*/x%#!L$Jb\REr 䓍@Z_(՜S`21%$Hd©h,wA 5G:#~`8 ) p;w]Siʚ||YANnZ'O~[b;:uiMN?5 7+)QrK1:wBh< OU}<|t-QsC 3#iw!ִDU%D+AR 9' !xw&=,|#ojFf7?P2tQR.1nNdGR ^x&'_fuoVڢԺ)hIiKѯ㳳YEѿ|콗swCGq3>9_F߉2Mk^oYPóGz3D鉖fٓZ }^<[:ȧo*3o#W8OֽfYx;hwMfkW 7b&+Q?x?娯m"/ZC͊x|n5.t?U7Tqg״`VAv[׵fmuSz]+rgp&ImۤdG%MKG>W[%7ņwŏZ!IrsLY}z$MU{2_}M?Ev;v#4]* llI źMgl2/PMeb&;n_Е.v mwD#XJ^Wϵ'<6.H4VmLN (J>bU^!.u7~ylѠϞ8GupI5K&\c,E9fC>D-SaRTBUDk~2^'W90f/3ꅽǪ>gN0wgNf( 7#R ϓ0`\bnxgt,dVѠ1Ap8Q༷!qb-:0)y.JkS{]r{ TϞyiu`T7OnX^>Zu%nwt_ݿ낏/m> Z\B ^h m.6Ze).T;pyZnsOpCE'ҙZۭz]虴#GӎLWO:PrX}UE);hJ9§VɔJ#b@$P*ˠ JDBX2lI򤜣!UD'"'53ʷue˼Ws\>u)|.UI iБ k&fT&p$Lɐ"TN i޹4޴F7G.ԋ +Z+voɾ1httzNH=cYI<+$17Fh0H*$-YBjV#dHrq*M*MGax(ULlV!BmH,>ԝqb,&z B̲[\#(}TK5˹,QGS(_L*]눝s\A>if{mno߱=m =Oyoaϩq+`7A|&iX`CUϴg9T3&2ONuA,E}A->?PԺI?'_v!uIzt4fK-s1;+p|ݨ5L"nT49EK)k ބ[9k+,h573\ m6PW?dƾ}|6 ?zU[)lj-U/y-6_oFI߶YoN.O/XL謽9Ng_0O?f&_{4󈟽s,qz82,I{Ռ˓\n=HXGpU%@ \Ur;\U*Ip **N \1NU`W)jJׄro઒\Uj;\U*•QWI[ \>ƿ-w"#&T8Myfx-#~OZgPt|nR3t@,ksF 1E;|-ö0h4Vg^ŘϷQV?=%#i/,l\S@Cf _^Y VWU-}ca臕vy6<3p$5WFFijxL1n$c F8v푖Q 6Sɥ2Z'LߵJjAx1ZuNJGpU vvo7{8Z=\U*Wz1p(m`UJ W$]2N;n4G3?V-3˼wlOU}w; $+B M` R1y˴ s6!Y2v_:+wVrf2sql4T.?}E~v#݇٠Fi)HEH-B`Rq"E=hRJ9y7OV47vQW`ix| ܸ:ׇ7z8Ef/3\zxcG#ϝϭ ]>?%@*8|Q!Q2D"b@$Jmְ Qv;wv=Kؿ\ )_]B j0U~. ?c3.eb:ח/|Ϳl+>:nyorKy6nCs+b&ntUZC㴳 *+k ;f|S,9*C{+XmEˋ\g(4B\Si6)@^|^A`.]B˨/t&D ,DRhyPr'OV6)cu$^t]ae%6n:PEb2'INtHOjthBPI K0 1lEˋ8= Hy@!(ޓ کiBhlI LI|`A/L|g@Y8Yf]][oɱ+yIrR}3z'1J()_g)qմ(wufuuOUUQ LsLR Vf#³(&n-B6 =- i|Ay}0J:|Ω=GP+&ǂf<~QX6ΞÙ,o[O?H~/?5;t%A d/y׿\q7y9:A>#KQG(w |寪8#gyg?[e1av6%gJrN?Yv,L>[|E-bbl$F:ޫQM`0XSz4'kySûUl3z~hxq-7s'o3/ޡ-'wƞ 枮a݀fUX q< g0b'bg}mWFdW]cV^,5;(>?NnYL 9=I{#QzO,?~y;ΚHe}lOaކ ,>29\/{D?sŷ Up<$??ן>GwzOoqyUu,<3x_ܿkAKu-t-J>CLJJٲ_jC>-q[?yP5`˓ 십Ѻ6$rǯFhYU!kC\$oȞY DI6opa.Uz~;FÑdO )'=^{ y"/i*vz,Qdآ=dۈ7t7ܠV _&a.^Jڋ1NΗ5kU؄ C5Rp޹**$TF ̊hXq(!`wAAR[fmqf6Їn_ZgZ^b _~E KeQPO;&jiO$ѨxI` "˞z:PP6o˸{OnGt<;|3q6'9]7C}ɼo!v+4E_S7S,{Ukjϣ@b͋Qz{e]+u+=T/RIV RoGV}r5,,8K\VAJ_T7]IY:>>RĘ1Ȥ=)"T  PC`M^ϞD\~IEn*F]Ș7K$ $S69kEO*J_/NOwrB;DʡJ]m?rɮe2[Tן ܴn6UfvPM36t '=k8kS>Vfk?b5YN:Sҳ3d)xE+l]azuմNst.8KRhĤE8V+:ޱI9{Q)wGKN&{iȒoENH.@)zt焴'(#,e,0GCtUImpD%e@.DoP+ -GpH㹂PDpJ /b^6ZP-7NVLN s[JvLnrn; ~8rTθ"^0a2kI<$Pع(縡3 @5RBrRF)C,PW% 6X`Tid,nd,b/XNXxP,\;͙f|3KÞGoTv4| gomα ùuHT>=@IHTר-. qElylwqaK A1T%EI L#x #v1vv#Êy,]L:Em}B5K#Vy\U`$I{*aPZ!.,e=0 h!\= hsYT 8ƜzXfe ꨬ-ٍQ Cx4WJǾh #>! FB ڦ$&LCF!-<>R B6hY.jONGVsg"mpdL*ʃ2θ༆(G=Z G3bqU;"r|ŤdO\d,.NxwDB0P`R8I a|G?I'*j}礃ʈ*h]H_! B)@t9O YnẂ¹\B3BA;笂Ӫ/ꏰ-zkQBq.n7|,/FI6I跮yH8x;CL`?iQCɐrI>-/C9k ƟD- K&g'M`3gxbSԿ7 hHZ}<;_xŶf}!aL>r:yփ${W}sދuj7s]z#ӧ#%ȡrey|];m1-R0%).%]tz]EwlAzHj/IdzHIt[~rI쀷J4{e2p[ת!9<E]{6%{)k8f5 bjWӬ#<wMU<##{tO 4١Z{M~ZMۇCCJo M)j@CH$T5e賆(8kBkmI͙+Cl ¸; |%4_LJ4vO8*w [DsA1߷]b xa2mf+޿Qvcu*3B7Oٙ6<0KmJmَ U-%1l-ޖbf5tdzVYxxq3%'nv\v%\ot؝e$MdU\2Rk&p izH*| 锴)AMs᠅b" u}sܕ!`O1_Sz >Jm4$\hː4sQ%o)8NWtQvz,kπ^V/q{B1cAs4?8\કL8N+=Vi+*$IC@I <hŜz#n^ $z=R]g$LBQ#Հ+_*0tT.gI +:!_JK"';=Y~"jnl_a-YMD0')4?{ײײ\>GVeھ'#Ȓq="^ռQo4hPGHV5Bq$;L`I^Q?O+`M H=ڹ",5Eɮ bPlˎ'.~j7Ƌ8V5p#:e9e@!RUYIs8*^L'6*~z1{~?&L j+vPkU+ԋbSbu6b_3E)hhM;k[.#پ4nOSm3GGJNŁe\rkP c ;I!؊Ȣ;X%uY |h@тͪ.\96}®|Ҙ}^_o9t)),Uq|,X%SZ`cԶlMf7f@o\fEf n:as b9k߼!$YߴXƤ<\ŸhqUDNS sVj֎Q5GWAIyr)C{:VJl#QDvW ?ǐ1b<ᡧ[zb-=iq'?8I9| +AL!!?y6kNH в#nʥD$)lIg>Δ-Ǚu8SgVLd9Rl x՗"U ƒV1Ebի-3S.5 &!K bjES8JL1Sc8s1qQeq ]nx Xϝ+7}9iR?0GaP3|T]P6F fW_ Jūw<#R_>[r֛֍S LXjf{j]VNZ >Z_LU,Rs9No4`vQ)[vu7;+?P//Z_jXJs"G2v;:k3{r`r.)&P?Ƴ1[xwm7#"c z~^ܠ~xtz̶̧k4:eۄt+=a5>5>9ꅈa5s -8j]cPbqz#j5xI upmЎ^+^q=ŝTfFO1FzXGBС\ǰ]"rk|\Iϳ3c37 ]+ǽw xyRs_85QuJYܢ=+j 8k)ߘ{mT8ʹ?s{viyy|BJ#w~KH ƑbcI_mG6V=Oeu"Sϳ xHBH{9:mHY}8⟫,ڴǥVc%R!7))Tb 6Ɇ nF/pr%޴Tb mޛ͍ .p.ڼ´YlZuf<Yٯ{'U*4ԉ"66/0gw{i 98+-V|J"/_Ɣ]=fG05׃Bd| ʔZjLO 6 P-3;-S :TkDnkOPfJ砵,6BTF4X{} J60.;[JQXce Aêϭw^SL;#ڇz8Eh!1?Avdmg=;>Uo/K^e8T^2BF0W1Y찪jh]5k,Qmi#\OdglO]g,xNjK]+,FQ dg{HtpNK=w-U;1 "s`Yklw!qL"jA;7EtivdɣX@4\VDK K\Ly<EPTR :kNǎ:]:8$8Jj @h @yKlc+a'xlyvpl ƧX˜0d] ą%X6nL73ƪ*QᗽJ`T-YaM`eu6"K1^lE c)ݲ6 ѿaU>C=ZSLK ۱O[UBy꠩౲K*$#*8 7X*qIK =Z/E^SJ>X@,b3xh- #x..)g  c$iMc ךFbj3X6WE e#XGd \ +{qMv` %BA1nVXӢ;K4 0#|US8()Rv>U,Ff]WZu/J*"Njmb/`WTJ2S)BbP-B1@qhQֻ Qhv6.9QfXZtQe@i<ϔ!=!݅WX42f1EUJʦhsD>!R \n:mV28Ut)\]h)7VS dPg r6V[/5zPr ҈H1d"eU2* U Lt~XeGƁ6S@^\@$6X( AJt+S{H :JG[]ƂQ,TG''XiVl2JR$ c4>ؽ3eoobTnDl \*:@QR"d2;%tMކFL3Eh1H\\΀B.ds2V53DD#+-5* ΃\@Zi29 qإ6F`$F8BiX0FaE^Y`Áq )dgZ0]0Q,^gi&0 ֨ `f)4ͫ*m"Zz7*-IYy]$ymue ak C:[j:8lE?Hc<[`tZLeiٴci$V.KD0vvnj!f=06:7  kށfh.xqKsyHY5X`4Ƹ'o|7G _6D,T@s1#%%ƹrTDKhLT@=BBЁJ$5@X2r1طksڌ:fC1IĪXU?]|3RLȎYY 4*%\FCd]Xr준S$diZ X~5H;gw8^m#:ō"N;U+jW޼|֟ {!M]Z}Q9t@-+5즦~b-{~Epp)J $֡Q-ݮ+`;NzJ VI DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@zJ 2m;(`{VVzKJrNrOJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%U!3OJ lH (y%oKzJ @#'%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R* [Fih[6l3Qڕڵi@t۷u%v/#`|0s?|wp6;_^Xeqqv{9!s_?~묞IP+IaK?L6\.e6|>>l~Ӌ=-gGٴKdEF9<&Kjş\|w8rjL~tP ba2vJx!N*Zȱ/= /']7> >dg5Fo v^1 T:֠j'Y>N>{\_eX(v*x:׮ B)RSϾl9亓}*dV.9'}L@H #{nKYvDX)&[=;"3ɞILg=d${&3ɞILg=d${&3ɞILg=d${&3ɞILg=d${&3ɞILg=d${&3ɞILg=Ȟolo_mikulo_mڝJɋ4?CU> `S#\^p `$\z ¥k]oۣp/wO`V|4[4(\p9ߣp nod+a7+pÕ\ ه}2,lц% >lJ>#|x4֔Ӵ۴_gg~Z0-̭ Sr)m;<NSl\%67kܬJRl6zE$ؾ<}*?\MaVH͙T)LJѻ\35;n4cm4$gKuYN9gCaů6ÿ庥o&|yr~|YXl6>:ݬDj?S㗷mTOo።WWostVyĵNaCOwqb=DXwjz5_w}6-UfNyfab׫*ety)e];yM=^ff.L/NNYס ?] U><k5%ưO?s8U=MɆ׷e]/b cq߿RFW\lM-I7{tr6ҥrYJizU}ۇxuqhʘlkoz;$7li˪02zMw?g'vܪ16V-a ]g^O ɝ[V>ٯ5[>f߸bdͦkNV[-Z^p""d-)sPҚt۷O>C>vVS9UmڒA?}SՋwܸw)/nd~oVtYE}n߷"m`«#cf:PrƟy4]@Ƿ`Nx)lu4pNv*E* P!t5sJ}sa[3O|ʃ5Ɵ7m)U| x &ic*V q4+ NQ9gĩ̹N]B;փјiIw6[`dJ ~M>6=O_7&}(xGrr.;iAUe_5c5 {s{$JGsًA\l=_r$`rŴOLoH_>TCS'|v=bc At2 |`. #mۺ%B-%q:$1>ĵezpZ^\5^rC]_B\=nA᡼7_Ç >jG#SߓѰӵښZ=k+(k8z%eI&8bvR.Qzn)s:*UҾ?#.]jŒ;ک|u]dGB]7qX+$]IR "/ \xg |VUFK5kqjiv5efbﻦ0}]~WmCdxzrIӐRdzYVyt6sc̬eI L ]/Vs0K(n. (` ( (H-QJ; QgYbZ%IEHZ]޶!:F]G3灻bH ?wi;gO+#n6ol%7t~)wxy*}/6X ]vVTLbۡ<"mN*ʒe qpPu\ '7˜=V lv^6[z;Um`ph+Yy%^g0wuK<\^Z#SVnم/xY^K%^Ia=r.!kpw)tg}cj + 1N):f*f^h!_d3{nMcҎEYj;p:kgPL $u;VNj,<*_hϗWqy{C>?_ح\i앯ʳ,Kk#=wf b^;GǫuV"n~gkR_iUWbՅu"G3' Ȏd%m +ܮ'Olj4.,hJ:83Jg\շYa1r;;tOZ˷!gu5Y}QQz+d2Qf8T|J!qy`WVTZ(Z.#,FubnU&l, }X)4I !,r JN!-{͜xVvӝyG~u=Du]:Xi**g)x qW⮊I^bEΎVXD׎蔴`N%gMλ*_Zfd˛en+\VnTuäj,TY؊wKEx}u4̻S2Up9sa5d\.f{Lcm]J7fH6WbDBs\3ch'0cWe$rfd͜:WMgm:CV/PYr)W_t4IG?PoPd:9𯪐Y0!U\"0\j4E)XYAFM߁w}QڬtmRV^^{t鼯0PnW*29N9ڥs-]o_WYED`MɯU!,bĀy䝭ŗ́!C8}>.8Oٸ]Y :eG/Ef(`Yz(!Fa|FTLRA6TzG}ho\-ؘNY#mAJ1Sۋ(`o-ډ6"ݕ)Y2FO/FjUSȤ2ZkJDdpw{:K3VL ZUVg__TN|i/yt[֋-/U:ѿft&?yVu'p/TFN#Gt%hQ'%P"K$J]fӝPl23 ".2/۞~R]t6$^UxؐlϺ:ٝB}J.]/w>Z䯗tޓ:YMPkORoc[wܺں[=/dӫ6;O;Һۖ|YfZnZlsw^?>2?Y ,~uG[i\So9㯚{Ƽ!m2OVPys t ͭYnm89zBꄝ~2t{*x(\PY,kٕB8¸糋u9o7@At;SX<\0xN#ȃq@\ Rt@5O\G1H&kJREhjlU Ck@{-w) I#+sLƣNZᨱ6nE|rɝro=Sv^?/yRʼn8S|"+)T)" uII:,mI%p:&JI` wRɕdQ5 Mg72*Ͱd mc,+8|_Vf=fg46/A×M~`5nRF;`Y>}zIf F6LdЙ-.KXS"r /C5Bb631,dN|) #vF85PPt݀}HjCz6l (Ϙ:yj(D*KITg)`DYedBMQRX襴Qc`,%v6N(7:iY]gdOi GuS!T|Q&K*Fp%&ӲA*9 BjVLZ:2hn3\ԄBN=;RbSITS(wԗ˼9E&V%]4Q9K5 (\ eh+H=q(!䑂MFaҪ/tžNy HKs5:[?Ti-N3O>Tq&+inLS!1 &͚m OLp-BbٕHIX6]}&zv_!oRGxK^USoݳ )g2%NJHᝥnq`GĎfǎn|(G}Gl@rXc;' TE]: G>9‡&G`䈘il C1 (ceP %L& Ny#&^EJ m."{tO*b*@(I9̉P#19j61b^ ɇ (&UJ=ä WUюW7i|Мf臻YR0N[u Ä>2A*ޔ I[!L%IB Dy4˧鍆 aMvxr0x(aoP4H=c"u4r#Eʝ:$^ vI BP VPuzu[Q*JlU Ya9T(A:b;>`Ad4 e\#()xТu}Ai )9*`yުͦ#2Bw79ToeX[<߳6zpŗOCHpbߒb>TO`O`O },҃?aA%YVdR~??-h&Y;TP uY!lc{න=dx J>OFr+ /qx9y 6-DǏfCEa" Jg#vc  Vf+TP("JƠr`Q}ZC)SYTϵ\EƾfٳE Fs);vW1>df,Uߟv TVw~{SIߣ{7 `R,öHhIXr"kXsIxrQ)g!)8DD|hi`!ZgDR4Ăɉ'J(Ɂ`$H-Jj-iZ2)p;lO#0c&O'{՘nneʆr yƆMuEFB2A(Vi!9iSe=2AW^D>sW^O=g@<&hrk *>'"4HK*D}b܉h)3ړ Th\x ##ilR bHKP^QwmZW4aK3ݻ}dfw7; @nɕdɝ{0Kh1M/s,c,9*ڲ,woևN6-v}. 2$QԵgw^~Y HϿȎ. Nxu]Z|LB61Bl91=vh c1ʃ :XE?,ztNfWtk-p`wG|LR2>L=f X{^¡3?Q ~vWfG~O갥e,궑ؑvda1}$/T:Qw zەrk'3$o^KŢLad79k^j4ߕ٪;vS5y0^~6E>9W=*ˎda9g޺?|/}=Qy=ZvcGFmV+?-uxsZxGBMB+ӐRέ@]R?j2Loj VdFd*`Z~11R{Yzq/&חLۅgU7woD<Oy86€~ x5zDՇAwr2LN ,ƧjX94+%Qq=t]iْ,Sg\ĺk,{,׮?X 5[Ϻ}X.>OgGgRܤWL@tBk|;Q 8yJ|iH**Aocx;I2o=oAO?#Ҙ6vpa3Jv#FeSɵ:PnlӝH%7NuGS 6DϱhW"*5I=PesWǃ+W+&H:w\Jx:"\IgMZ_PKVWh'qfSR|PK1(tR<>^Tib J@]|ߵh)7CE=ZŇff~>|h|G!Z-m}Rk}Z§ . cahi:oˍ9agWr-` wLJGi9Em`WqRWG+9YW(d+D6"N0C׃+gՋ `'m]aqNSo WmTְX{+WbCW(X Htw\J'\!QFp% HƺBNɾTp,J0?~.>橏u︒ #JsrI2j P-ޏ]ʾ'5Ypr W$ױ\p~6c":B\ՇFlh{: K}U'ҢjnXڱ'3m3E" }(x>$d.Vս2Hp0NH3 u"\'?!"0L#SR+P.p H->Ta q!3 6̐\MK|ک\=^pƵ:|Q+:\~౫vj偬v*gcW䀫V`2'\`gU+R"f1J2cW(9U6\Z{T:=qka3 v<\\U."{Tj3q2 v\g+{۩w\J%\!4ͭ+T6Be+R+dqE*0vu6xT¸ƸvϓjYI=-k4'bY`f3<U%C%<-ڲcnX'h-fjmvo9l|3+Y.JB7;B Jg+,m>q(X6qPSٷc\= 0$|mu պC8nr2p{V=0> `dN.?py;ZN%l qh+pЪ[ H7lVjOv*pu_: v 4\pEj5vNWýS +%.\Z'+T #{,x>"+Rď40qhP; 6f+ku.Bzo]J\!tL6au4Cwh9@3SDrmj5~Tmmkکt=W-p\=aֱp H㫀TJ>q%Ϻ"&\CRvJ :B\IcɺIW$d+R?Wp1qi 6e+m."joo3Wj!#\`ɵ"\ZDqE*puұA}d$*xh`Ck 'Q;.#zY޸~ld02O^-g~+W"mp͋y=}\_:?GȊePyYRno%\|ҿEE8;ر˛^E&X?QMne.wj OUuKor6"^]y[xk՛d~/ QϳjI'%7OsOP7nRi*^?>iHcK'OR`8$2[<]toW#-U3|*bu}: 5k. @:J,HX8DʀB+/|b%7%WP E}4J{`9TT/eF@&"+uVnorz5KY|T{=P/lZk8ۤsZM7UF2ͮ%k?;Eͧ 5n|nOe^|OoWWʇ~wޅb.Rŝj Wu/7\龯558J|PJ)xeK,N0V"vyʳwѮ\ȧۛ}`?߼،mz3,br̪8/._Z6fYlMd#}c8S*$ƣCbBgƅ9a;OrWo8SBy35l1csxbnܼ4:C^_’bt..d1}m,W3[c<ʿiݚFB+Ŭ2B>Eq\v  l,L>}{[*4G [@iQ1}E6xo[׳iYmܛ~ׅ>}۫Olz(-;e +X0L[ R䂳}ؗUd`F tZte1m"T>H1ӤTiq. Pu;kmH, / -K]Ɲm.nOV~||XTX[>љr]W7p'N'\7Y-j>{Jnjp4~9Mc/4Y/M`Zȴe<'xl1M.d2tN'E8a:`:V2RPNp)@x?olz>+`8'1|q6$ZC7ΦmӾF4mi3k>z5t?,Ɛ|)qs-22 M XȄt,Yʨ5H`J2V>hN VVFϳ': LΗV4+}(Eԧu8/c{\g M蠝Sm[S_޷Ag}sZ@؆MJ"&J V+#sR0-e@Ή@myՌ0 `8Jc5+!%%Ѳli68P\p 8JdPF9tR3)c9`;;;rӂz|8rW䶧V?UdEiy:&VWx)s_2+-dV'8@[lRR9V n3-„G{DFS$H!*:_KhId,] )2X\Py:!>\PלLOɑx' ) !Aҩ`D RecPUіe ^3>tgɑ"OIp=7 d-r,n_[dȎd+=Òd۞^cŮfXe݄:{( 'ԃ@Qmf7 RE(⢻CЉfvkoDTA b `r!1xv߄ٱ.ySC xc\y;c=Ǔ|r{86d>]~to`0og$ѹߕH:/)3ۆ-cDǐn^2{~n?goG׳4})Ac;V\{1l%%(y;ͺnd;4iosb~uK~5nѼ,*0{_/d fW'?h0>t9:Ƶާdv}V}iC;W-aH;ϏwQwI6/׎Bk%r'F*Xzc,m_on0<7}+ভlc֫ X/0IGڨGn/"$_zlcsHhqKKmDݸ銑C3igg?r";Oپ4…̾ͮӧ"-in&ىe*X]!т]tl235{?/VqƓ&;^{8g0&Kl>M`QFKba;ăN]/Bs ~c84yvRfNOi-Zo, y&UIG%EtȳUٕPdƢ΃[[}֯!^_PM ?sdd| `c T2',-zV 05^sYDH`9EʢWf fouR:͍ 7;񽞐v'[q@9v_2.Zfwq-T<' ẽ-y~t!ųdNKG$>a)uz] ߯JNq)ֽu<>a\\]':(`3~?XWcG!He<9ĄNC܉ 4ry>X!E$z5+ =Aw@`$M``v}v0DaS2X =(#x ݩ 6ƈU ydtP^&wEt J~3}ܜ꠱GhGV'eѩ΄ u``dc`nfE*FK $) c aERs +G&~\Ult{"ȾsZ\#ˈݻk#-;n<[lҘ˩C/*Z{y@(:d_cvCc`**6[2 0: &5BȃV\.3H& ,E0J+cDb u1SW<^Z!qDNiLd%dc I$cB;YYϪsz2 eD-OKQrL ` V(1'Ge *9yq9^&*PS>XXy' [I *I)J)[$`AP}.MU jE? ~] HM)(>/R5dQ__Nmh4,6Z] )zʍRL7d | t_ qT̓ǣ;ҷv3ӣ3S)qNB#JP&U45њ%ɞN" uI2H,'i@'9GXCגּ<ﶽ 76zs}h}HVze^Q[0,=&IQZF$ gdVU q3 <)ZW 򙘥ZY.Zj>!fV(Ix9m;̛,h4(?xÛy rɇ7?՟췛LbL6rIJ f'.'|/0uZi~Iݭ7$]^&I'ZHHճ֐n%Ko)ۛ?,xht?%ZCHť\ڹה%^i:wV&sخ ޏ~juێ惿_u-oP˳~s}3{_~͗ZF]uj ft%*ϴ.OG ov/.SUa-r$tx6]OkM fWWcW3zXhoE-s: zxA<^ &{í !p1d(CFAL1!p2O2lbU.rM$CN dDžұ$|q}42W9 ,n.!{jcמg+pn?W,ݢ]MuTg0fݛfbz<d~VŭFՅ*1JaI k,2jRHdubM lhfC +-% j40$)QI3YgY0Xu uڷn%;~ݾ]/oXR_| J=DbG.#ϑUYhHU ")20G1Ҳn.nf{ح?4Eptgݼe]T_v|qRCTZڐl s%B^KHpy !A!hI%$!Xv*rA]=RUBd3c`}[% YqO6:'Ng]Pb9D\Ws(ESva-q%:㷵OO+.t RP)o: ST>kE]ecARJuq (5UF,}5UQJ dbY!ZGӱjWT%΁ndДhJDGo*EM6,҂,Kt΢i1"ꐅܑQi M +߅P1 Lk 9dDa3RIK0;"iy*PY×V]Ly`4,4ѰL& Έy+-e} *pjT1k'BhH2JS "^xty\ 5=bui \VdRC"Kk,n5qéUO1GbG͌f՞epqGI19xB); WU]ss"~KlV|PgdЇ:1^Aͺ bڻkv]c; gBZHP9-,]׉JfZa6YSB&[p3ŚǚŚ)џXd!blfYe1J7R.x*"p*0JJp>!~^j5f_(=)dhF 0,cA;&d }i;Cb 1r6zj:㞆,P<{Y6F7dtp*# ì8U@2H sN[@WDB@K褝[45{aWē'EHFPBP}{,MwmH_i,~f3`wY7WBdI+əx_U$ˉeKv;jFfǪblW=w֝?8 K+2@ j*=Q6 ù3 ې^"ko >"3\ '>:F酣(UNZ!l@vc])D jFVJ+9K'.Uq]L^v@Otx>5:sP9-ߚGN(!)4Gz~k~Օb~?aU'9bů}W\qS7 Ĝʕj8h8N'C qB2QȄ)99׊sÉӥ]@'idO kSI%b:mpnv:o|~ OznN\LpBީ麘DuZ̯fhG%"x:I)QhP)2ZWmqLivvFaݘ[uZ.Z?k/͓^o*fw1[j0p\Mr{ӶΑb<x6_wƯ!JbzkIB]bbZ̦AxR "z2}jk_ꪒJVW%F1(5,A|4jIU mZL;37*0ΖŏWfisP|% TE\R@MfLyYԨ%*~_UHߚ*" 0H$yowߝ_ߞ}e_{̃~Y d< z/w/ZҶVEs#v)ZYeRm[*ٺpk 1=.@<]?^*Pcջ7-^6Dw8r!wN UX%z,g>Xē"ʁmlXKYvl"BKMJ%"r1ylԒˊ%V Nbie09,N||f{jA ))x𘂇}.GKe*UTV:Vgؘg G^&=>"%€/z:cW/~O޺ b.''f\?;&0<> 28gZ-Jx ELȰPg=V_b(S/^.VI&EPKsSr[GEJ%a{nY]*+?qMd$ETe#S«*yvE(-)pЙɦH]uPFW1V#LQqʀz#uQɢKTpx6UPj[03ƅIcP̅g[9+ԌO<=h&7MVH˹tDt}|z#M+፬u:R 00R۽J@3"#{&W:%f6jIIXUTM۷ܶaq01ڭIcYk_JKtUMj$:`*ǘIM(5U>]FX oO+:p$( O!΃YI:iZ<̇Q̆1!1vfm[XF-3g{# lSM*&+癔IT(1a`9*ОIUF8IҔG-X<F$<fBs`f}@kpv"AW{*rK֤䑼H̯ŞH !L( `B +YR+JxK55Sm K[>lM:ˇe>gûX h44XQFjd.-]H(7R3Վ]Pl<0]׈^h-%P2-R{Е]x9^]!\Mr+DӨt(9U=]!]qbW=t-7U,BVv8էe6+ /b,BBWVtJRŅȈ*B" P#USs 4TQX^P<W@QZ]܂-ZJ#UҖ:C|Q1Fn|\ \5jX_Y̯ e10NR6_ƞ,% ߿z!$pvJ-İEu2O`.%776" }q;Pm>Az#9Z_K +(H> /_~}MM8X#K76,VY!ۏfT|8Xc!\s2]2qZa~T2Lb+˲q9#t(Er>F :fDW3 ]!\ar+D;(mOW]^N,?y?Gv9({O+]鞮slFt2BJwF\OWCWKjiFte?JBW֘OHOWGHWkHFt̆$,BLt`=]!] Ad]\EѮRJ Ɉ\ uB`2}PuE J<-OѲȦ \^6bc_Ŕ{%H9pVŪdZWOS:Ρ-C*T l:"bs_j/"ۺm-6jIe+'Z2ÜL '"H-FAǥhYr6Rt߱-|$bԀm$ u ɕ%4لA!ZyWàLW9gJ ]!ZyB2?FҘ`KD6tpȅDzv愀 Ld`u=15·ZIJEWf2=]=멒u02B:#+1,#Vl J ]!Z#NWL3xki͈@W!_aO[tp̅-]+D)xOWGHWn&yFtS<B+Dٵ ]ICR6<c ]!ZENWR˞z#z{Q2eUnFwEhAd'q* u[t6BќVg jjr2]2&qZ6\B1.չt(ette,6'  n[tpe6+dVwއ(m]#]Y+).hYWۡ+cb5{{КiW {Е]OUs/ 6Bu8֨F2Үn>U׮=]!] "V,BBWV#+I9&NW1ҕ"[]`q {@Ұ5g؍\_wlĎ_Scwdb%BI5(YJܺ}^Y?PsOjY6;f J >aj$[^oM4&# +*mhm(5=h*x?6\ Ѳ/!ʮt!+kIҕCWr}uLOW0]}0OFWAJ1MѤyY>DrP;upLX_R(A̽KAN$:JV'~kxoE(ɿ_~jtP_7Mw~n*W(5\ou4ܨ/&3uBUJHo: g&_ ,7uaR:ۡdp1Mwl~1_ uw uq_[u0Rk.#⪧Ѽ~}Iw̪76T~Xv" 7ͲevSp'5#n9p|3N|^BE79r o+w y*cx: 8!%g~TocT&8Ι4U$SPD=W4(Y) qXro>gb 30+p, @o%T:j2+> Xh JE+Bo,jÕKJٻ8W&xd}B‡:rn(SB@+ߝo#s9`0Ȯz/r:Et}ߏdJ.c4PjMYϛ.R͚a¦i|0w: v"Vx?4ta}mB T-I%ۻlPEVq͙0 f-DzϽBMTDzo%[Zd$MԵeQLGYo3[@ATR4pF)KƢLCu(.e`0 +1 XjDcU91F<ٻ0F><3RHU"πQ.DIoO}"K'jSŕuY*/`TJwh'!T {ϛ4e7-GpNTG+Y7̶6E5ɍK6J3 |jԱ).ɇ5i709Fd{r5nj5o3|1!<6Z:[R@j%BRXoݸ7K -F`vѾXm6j!G]1dP둇 -U-X֠Dt(X(k! e 5P #o m${µ8bB>3z~`_ݡłTM%1М,4; zP0B\>y7zqsWokN9,ŗ*ַUu#D m3bka%a1=Cw / TxW2GH}1[Zm*d`f`yG a 0_(XX肺`z+R|&Q*L&zZ#t^Sʈ?2b1; ;]Ks,~OyLPBb2$kuk+<o 1t|!puaQ% 9ա5PXwNu2'rPPZC~{>/yg.o%Ͷ֛+h!/.D{qis{I/ߧחs 7pЛ{m{{;۽>{JR_} _{;+iwq}v?a.lS[5~ v6?s\Z/wt'M*NÙg U=nɮ#["3p(>Q@'DNIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ tI լ@Ȭ( 6& aO$ %$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIt@AO?d㘒@bXD$VOAL@',% $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@:$ @bXp& !J'(I`()$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@'hIL`yUw?ޮoor!Z5p2 .nRn-%Fḵsl$SgJ=b{G}Uz2`GGa]= ttdIWr++jFb;]=PBWCW DW j-th;vb6] ]q/ +K+F N8c֤SZ\Z:vbNuƆRQW ׬-V;t$tutm7VDW  z-tho3${WIWfw~n~7/|z(|ys^bnql2}D߼p\Q>\[MQɥ 5GNh7Z[?Swݹc~wBIhyz|NS4&*Mŕ4 sɔ3Ejvogyx3lS#y3%+w왲Sg/j}i,^(~M6:i|>ETL?dҊT빡p_QT HP\]1WCW 7dϧ+F"]%;upjp֫aFyJ?rKr~`gz\KWOCס-PEW3$E9tC3=U_goRdx彖Ъyl<Ƥ_n~޻0)P77+/jgHI(D<(@nnNg(20{쿕ekTRfy|-㬾Qǭ@V\ 5@iƋqs,5#@Ï8J/{/+h80xv^ۖm׺r6W~Oi'cqφ%uOh rr;lgȎMOx0zV?W#ͤLf /a볋[3*ZU|~p瑒HEp܈qV`\3ށA#ʥZĻmwlx~ٜ5u3SK3F~>)"*iE+*h0'Ltȩ@sAE?/GU9ܼ'~}7tߏFy%TS/hcf$/w~ŬƾCmux{3(Ѹ>tP̖|= sn96\msB뫄ۚyvzq3e,ߓ $̀fqsMϯe=_{]ݛ%/$t`/, żw/ϩR75g,i:a꼑*|7f=$Y`E^͠Χw|mwkWŵKnzTwoNR>W^6/-A7<]Scwo?”b-;Gtz..P0vY4a! Z{1@?X@Uv^LbIpi/ͺSÛ`mkt}K#!4'-Wa؛B~SPoCJ[jGO| MjW쬙WeSWJr?,Ճ¼Bchuz \HbuTarw87+Ȭь5rQaT+? UL'7zq7@joÏRmS Z6l4ԛ7Q(b\qcW { fP'_k ;hU;O1Z;ӻZPINRf".{@Qt5ˋ L9𥖌&;n_cT&AAٔv :5?]>r/zWSyi|%݅izV(a.)0~NH% )$BwO3ǒ]X,!K&#`" GX?J?~뛫 *ݟb5K֑?M㖍hxQIAx3>LG eZ@1 D:#cJ`XG S,a1Ob``Kv\Ad_ܰoC7߽3"i 2ޔrf~ٝ\/Rql fG}ӛWG-yaѡFcLyjpE`VSGimmj20&l*K`>ѤNόz{dmHUD̴h!P`䯠_?*2FvX4B4+ptuX! ~|3]vZEɅm|oK<8918 _U]̱9ft_X4)2*g(^Λa򮜆Mv׋^ ZܨZ9O&튱IfI{;oSTV|ҡO*fPryvQyo7f) HYֺM{O"v)7d4 o]̏n؟QsGu" & " .x6h`H4;uif"z(/C`DʩR ˽)ױvvG &y0Y&v6Oי,ӡVXɽIq^`&ȰVGdP( rXg'S8G1t>2D5ON5@^FcD2Y+I+5eDDL ` Xy$ 7&"1LMqx2x;c{Ncз-3Huq<|}EVU6s߬Q ܟ l60 J%e1rb# ŴD ["0+"Èc(p,Rg2D'HkIY,-,xGI4 8\glh?+5m L]p2]^ZH|veu 5m)-y-?s`C- ET I9+,iop"\#K3'xF?AO+sCa#NI4Ȣ:xm,hE1D9=eY׎ItݸAw+<5#p\<1r<BMu騠hbP͝$YέFs +kyb`-κL-Z|vWqHdhR"E]Gv}]ّ߻+_ē׉9;yQ朊H8gi}}L#G7"|; NxIK'8vsg=<=Y_Dr=5||Jd>VPEzWf7A#nR[|+HH\d4n:˒)o{Oe_;O‡hYn"1Br.rmWpZ'x :ԹSٖ\;mulݫքh]2nAmei{؇oݩr}A96nՀR_w|<@[9ZJ"ද8K-Tu(~TS$k] J+m0"6˭вfyD@zo_g :{3v5SUGg2%Zo G6Fq+jd{_bBUYO}m[#.l]41թlG2Pӭ?)wM oک苛aNbJ }WVD v\㛯\ҙ' GuT;0RA;ܹ0HwsZbZc_VfR{큹񖰙N.?)8igqf=g췝$4TW aЁ FRAP *d5%^;7EuZ4Ͱzy\=;u4B??2[ܲQk<h>$` i EX: "(#h5ǂ#"'ldO]|z?elEJ54ojJQ D(}D$d/Tn= jh?zn~{)9 A` Kgb`YBYq1o=xg0NlO<;%މ= ߍ#"IcR .Hؖ5'ْ5N"ںp.г}!ζȾ={LE@,?\UKxl@Cxv<*0yXO;!:[4^̪`0\iGV{C:.ꀲtHMvl8ݹrQ4O/4ZkK)ƔaX63W >7qxQ=MtYK`1 aVLX65c[MbR%޻Pm ,cQJ) cf7'oƬǎ]gݹw!SnU%G '\dڦw;>& ɧi x#bۭ{n=::Bme4"iKMdw1͏.ToPOez9N9G{֦3tʻ+YėׂGsl 01P"-|& ]M.M'/O `/wB'SȐ6履rXS]ut BM.]lK3t9b ɄHJ;.V] 4F⭕e(16da8aeVsLӷ9UçVfg\|%gHg<Ȃ%PZɌl<itd,e*I}mnwl>b4cW#\M31WP 7$w t2bAgV׮/bgg0*aʎ BXoJdgu' qNjfͲ\;JlY1|] G߁ iSPFdՄHC>Y! M D*$ {B*uKp1Fi)E(΅tL2XBt!eBiڒ:Rԗ|Qdt[$C>V֢p|L&j &8o/'KtΗ r>0Xl s-vc%Y.(*mƾ3cal0nW\dxuGxUI'!ת^ۜ-`|猡&,ȲCڊ _Uu/v dԗ'! yRH9ENF2R8[|Y',也h𳐳wu8ؽB c1,8S2Xnsޑ9R5=|) n܄-P2&jÒֺ'٥1gӋdN#Uwi?b2+!PpL:FkR3SEy%= C.JaɍPR7!ԥrǢ0HJQETPL~PNKUNLIBI@U1٧17Nc9;]\I|7!S Udd,)z)0\FkzٞvJcLոy7l;&U-zś1~ ójS52[3ۓL l`Dlb .XWȒDłϒgUa+!Foh10u|il ;Ѵ[K[jYvZWR})l{#ʧ+s;eE.旚ym49Nq%A6ЗƐM7EYɨ{Pח\ kZ!d=fҖR Ǹʰ$>KjQ(lMkuqfm,U;σб~sŴC{Hw[(]&0hl5kT{ zk6^fӷX9u 5[hEM7:2 [ J)+Q((oQPFOԃMDAAņ(u[r[j0 )CIDri5GC(.cMq6gYԎA*JN*JBr6ecuKՏE-fkGC䢦P61b\"e6`JpuL uH5 R0VL7:3H-.mtN0Ga_{by bC,up<<@WL k#K(DPHIm-K5^DJK"H joXoZwԳ5{'Ѝ' 7le\9,7ݩ~qqz֞m8>KMXty?G9+|\8Vev?;2E+\ Lj k9MZ6I]Jq_9Nԗo#)VCJdeDaAFu$5' /S˜PXkHIug[qeةpò\b`8KΛ 7%22eº LH5*QT$. hH.d0.SKm2&촭RdB,=έ|y~[6n&{m 6e7^Zv1Z`1RU 1 AcZuCmmK 5$#R"Y `r XXu@RsWHɃkY&IJVW5^AxHζċ(ͭZ/i6lym2YLʠCy m!Vֿ~8}?͏6g/yɼ\aT|es47->__J6T;!0^}woo~ÛRW߱ y߲ F> >鿿M[U޲i>M.Gvv]AmtZ]mo9+>acX| f̗&ȒWrI_%r"ɲL-'XU]|Y|1xDcݻԃv;㣻lA1?4IP wN|b,+ VeħbPL+G17l1S/6G!.8 <dž<[$,q,rh]@) h |$sD9s夸׻3OW.VS'VsCwv^FE ߥU-myv, _2X:\rXfR㿯/Z)9!"U1wmh Z4zpEdઘkO]ۡURWOWEBGs7~Mk|Zl{;I)݅ 0Kq~fTʰ6?_^ҿLV|/=䅺QMV_`=2 ~IHntWZyrǵ~f|7Б71VL/?lzh))'R#f߂y ɕ׭"2b\2Be+FaؓZ1L0fOp2AQX)yWWD ;5b'W\Ov;dd ^\=xØL<;\=`%fsՃUf%ؒx\W?zo>ze-χܦGnW*/>{|-, EB ;$vb٘AH[|쌽xu!.[7ŧe6^ۦjV~ x>WHbtggH]ҏV*5B`%#SLBR#fAj9,K|` 2hMM7q&ݛTwoMߦ7q&ݛ{woMܽtJT<{[kMܽ7qݛTwo{woMܽ7q&ݛ{woMܽ7q&ݛ{wo{:Mܽ7qi;0z(ܽ->u. "f\!F-yːCbg.N& t!i.i}lc~σnȀٙgȞIkbkIoWoՇѧI?-obw~H@ܖ縐&?/K"a 灡/lb,6Ы୍bC6*Z&V[yCwG .&Jq^CZ OOA_4!MFWԮ̆ cƛ&8GvVd })oZC}Lz߉t]̏ƪAb3fj~vW_F Ʌa`G)wswKw4M?oz>wmW톜8[<-i4󧽦FRqZjc>,#à>9ŨFvƼv~Ke7KI &6 r,vEEepDP,j5s*(3ުlLe&Y6A"`0KLyRo)+l@?1dr'azzi<5tՓ =,מ#t8ע} B+ȬZт>wP8_YQX qM'+/bo뼸HDkzB|4r|"XZ5wZo߭;ʶBmKmޜwƮ?:ej.tͽ}ofs/tQp]ޭ.[ڳ.ȝͧ4t]ҕfwץ׏\"uG/E=g^wH-;j-unwzm|Kž=z^kfVNo}|y[X_|]ВUq寚1[jm{Yl|UwIʼx-ѡ,ôgZi㜉S.?A]ZB\OEȺXkЅ{+k{cŅsqggލǗc bd\p!d;قN9Jmùę_ d%C &fP!DVE ֹ.DB$~ v0n߸3ޣtB[/9AؔgшR̲YOeТ"jN!:P2m'F@4g#UL&IBODf&T̹~4v|99n<.GUqt|: ,E *[Xfc(b &tPl`UQc޳R\bTAO2RG]>:Eŕ(r(2TmdfXTjq(X l>_}*lI7/|_4x<<>_pvE1;YV% qt_ !ȓ Y*kDJE4R"0"#݆Lx l Y0jQ# hZC,hSs?b$CAjq(jڍ>% e {0h˒\:`*`'Z! ,0!R J'"I,(TtVF(NڹxX͜J<97_?ED[MCĆ;bS2L`0Ʉ@4Ns fVfz>$zԞpqJZg5/9e .nx_v4P2Q(Q3R{ x-dUU1pq(xX;C^axxx$_TV$LB(Ώz6jvn\UD)iXJSs'b0v9= v,Ly=XjPx7.t_Pu̓/ϧ^DP7>&WAeVCH#S2Vږ4y{&^ņ&^UXҗ\:(_?|(h:'`Q4[ fV:7*(hXÏ7#ɉ=ɾ{D2"-yFriDl+ճFq,upMk;aܺ(Z=14؎ĞUԳNFx$rPyѢPN#ޘC9"/VV*=^eF)D.h.)VB$1 !\{҂# 9Y !UUr6DߖyD}rKnV4dK|9f=9FSgF"K 7j#6~1#3ͥ"60楰YaM1f@~I̱2Nr~W3L2B,.mI0gEqV͜=J\ǕP¡CX`k@CN!`%ɩ`sqfyY6Q Cg%q$B ȩEIb.ehZ`< 7ժFxHoZmJ|$qLD,s6qX{zr,B7,Y'^He`3MtS8iHK'Nwڪd @6Wt_*Q&$*0h"Bir]#PE6B4M}.?PL.ךS4ݱΤ6< 26,AZrЕG~M)8~t}z/ZsO$J*!%9}V9 4ȭT{(PL`JKPuk99F_^ lND;d:o4~m<'zdsSΦqI݃#T]f:V-켰,"B&-/r! eȄi[\My`?) _xn5>~"jnw^3b|}nso=vP,|XDv/x 6tL4VEޕq$ٿR ̮ x4cgmal`Irt7)ˆFT$Ȯb!STY*3_̈ \%,tHXoaha]gŽxv%p|;YK=6QNfJ"lS AqS6{C6xFId<0uzϗ@A!DoV+L(95"VͼRz`8R4{Ekӯ鬫bMT!P =j+QTewdMzř"埑~^>ѺSK>?~Ňڄ-o 6uﲦXnv(֍>w0]ۀE3PJnֽǚr5[%0\_Wek 2 JC,HZ;-wM"^(!fN*d/DRrň8dgC?X=n&ǷX G͑Gr=r A#b$ܳ }Վ^GlgOC7`pg@} ( o#!O^O:Dc?43@C>a?}`UmI%A1:=q~NT61F iG"(86̣ "lq+2!QnZiQ1մVF# fЃV!'@zZ0BR i%3+.侮kj<BzC>/*S믝 p'yYf\ 0@U0e{MUwF1)lzC?{fg43G=|m1:|Úp\%ٺf81>T{TQT*HysTjS)!B~ D(.L㌹KKf~`ivba', yfCr$G ` ͍2YfIBc!x-.³8>7h|WV;)/^[d KD貞 GQ.$'E, *4okDunF ˩{kG[Mqv_oFo/'1{!2KE,&AԦ=K J80C#j|vjuM g?didjXR)Đ#\%H"[os2A]Q뜖pcL.՛;#(<y,N[/,f&9>'F( !]֬RkJA2|mR טzA! B8hYg08EP"+eU6 l^1|Ҁ)C 6rqsܵV\Q[>1fy낳} w.8D".s2E9r2ja>S@l×Z(,?ITHIm|8.Pg0o] 0a̖}lN5Uvs#t:3LuӁ\(lIgzsA#ي^_^0t UɌpėyV ?'`ywWI_*շ~TR} lYj( *@A."d.w ϘpH/2FSo w^AhO}L_0e yxρTOt}ėa4/a|K 4.ձʽfRrNh!b.qS ;!&_i]6]OM=ґ^ϝ!wCi<#u4s`[FEL1B4*Z驎UVmp5 9bΗVi ~Ŋ}*zb"1\b|4Obn ½wg=Un~8}"c=춋ݾ+Vk^oXG|.k޾f&Zh'p:R*lJ :hK37;TNJ#9iYב/7'a%Ko$A0S/u Wjr $iM \VJDA1AΙJy4$hcZ[+Y}֏I/}x/б&/W7EM?۲`ށJ^pԩKbYՠ_(1ML*Kx!_@, ^wy㗳716ρB5ȁB51B'B1Z(IZ{R2<&NLFS܆9ƿrxkt4$9\ٖJyW!f|1ͫ{ns _0.q(!>ĨTH҆GGJ\ <$hF\P@CKe;TYK\8 8}>LNj͘ 7Щr n<݃Yq^t~];U]Y,1ReYQ1h<կ: c$V:t)wQJ2#sT$יdB=w*jV{*CAyʁdUpIyS0TF-XK4IJ PGAEeƚg(SV^hmpS^n4IL.jӏN F)ziYM'c됱q*`&$"ӫѫjCDb#`ٞW4GF5=9t1͏0`DQ"BKM+[ZtȎ$㌵I]!`EZCWWӶmBVututDl]!`ך jtBvtutqͼoƗ=wNj/Ftdxl7x j2W(R ޛ4o,Wt=\Nzl[o2, ,PϘF z_MbE$ M1|y} cZttڮ_iߥ=?}w'"P*YqscZ`)4dJ(!Dxx{5S45Cf-v {,_W]>KͮJQK/3Kj)` s2Y&܃d1練ʶˊKX]ײZ',-Us >Qid Lr0yS ɥ*mbz,)+.(;QVqbwޚc fضgBu6C66Ctfk+ڴPg!=SIU_C[;Fʴ5tpm ]!Zxu(UGWĆU/N *vաzvh j+|i޵t%+ծUOL4+l[ jv( ˬn]`U]tpyk ƫ+D$;:⒒U/]!`B/:EWRtBc+!m¥+Di Q ҕ6]`Ik BR7殎_ z];4{=6wj{R'4_GWKuŨW4wR,{wP" [7*GC$(N0%1 *Ƙ#{Mh{ET'aA> nU 3|׫NU?j FF4>|OkVVMgv W6hLp\4?HFTėUo x0:޿Nŧ^QžQO<~7i~nW>_nCq&g~Jm|^R^o!*@g @lN2J)a8L3 N9+yq_WoAC0(1ݾVZ хcRq)t[RzYPEVI=ƘgpV,`z|&dwK\aYEYL&}F "V-[zð80O'[طwmmxj¥qS+JN*Sʞd@cRHJ~!EJJcy^ٞ` }9\zU@dї.׷/ ךjn}6I@l58oR>ɿ߾ .qO˷ˏ?'>,|#~3?,p'rmrrxլ <}}A^+S[OT7 ux|P)}J9C}Hc>Nx// dէAݑsVWJЎ4+J2|Ut1^OHqa2;CAW7wMJErmԁa[W>|U̝/_'"CԳWߥ7atPu4>dVsρYJѹkaK5sޖeHn ~B Xɹ4Xwc/neineie cSJn #Y6}6Lm6FaܓHR݌jMHӎ+H53si j}bΏA6LLg1 CЌI4w{K:ŀ9яf`IŘp@HEZ!em %'A̅ƒH-2>3ײ 춁Գh|h!zW/J]2 sy fH$ןgDLm}-zM6`2S#xHBh(ifD\hsd. [F8F(sƽb (`frV"d@26  4+kb΀Ò U d#"r,#@;4F E)V^nW0J=p.i9(`5z @ϜVNNjݢzyTZwPi"[5/ҩn41M^!a0 GLjfJ Ef%cA{ӎO/W=UOy^^Er.\&$YN٢ ctI01Dc&5ep:9qJ ̄@ʼne--jRݵSdGeNom}7Oܤ8Va8p82@M{82* oSc<̦U}ﶼhNU17KBpx#`I`gH A tg>B6WdPެގY}-!^PKh-8A>0$S0 bև8fzʼnfe+BRE=6+V^1&izs}Hݟjnmnކ倴GJ&|}іqauUZI-=Oo2-Jn[+Gё$#O9-?F}- |=9lx( |}bXãŎ)D7%c&W7oڵߵ1$hMN>1ִ;h) sH@FEoy^|ӹ3=d m=y! #:j,Qٱd4,Ur[U0+- Yzr:(oU#Sl %FV-%Fn{=Yp@P 82ڈ1<c\'-Puf&14frkwsz\_QYbv E1[&%Zrat!AJER 6ۚv.PkٙJs- M, E3gsM8F,061iݮy4B |m ǤQdUgjZ$we9k9{Y-W^PLO AXd>cN!Br*ih8sM4EM}-Q{(V+I%IЋMeA RrMrNC֠V^vi۞',~붗( 1+7HLVB޻ hBJox)+VG[y'ZN\wލY G&LSev:9Qв53- u/mYDm7TwxГ_kSk·x8'Hk^r-.iJ0b4: ԋG{cUt6CӲtz3) kgM㊨HUYwއ~]e; V׾Е^^ iR4"W W[[0UjૠAUq̳ݳOV' Fd9H-aYQZ  K4w9Dd#$Kk9| Kv!ko18B̢NnY-C֚w\!u)*(MƬnNr& v$Ϲ57Ѵ[ݸ8Б&'CG+lvߡpXY %/ , CKNaG\H H1d"C!τ$CvJq˭9ۧh%UqɌ@Q_a |jsŸLħo[ele' %BH dv:'08fѱ>x;۠WBhԤarKC+gŮ57/ol- Z#7?Nͤ/ZL:3u(T!'W@5Zɞ&{ tӆ\\B.#^!fgBk 襖>(T 脒ڞ"訃@d6qk .H}8,oM->;b~ynd_7B숕^4g"}Du鍙VFH+%c%zt#Y4!D:'a 8:rv\;OMNEo!m6+5FIߐPv[j9%x-zr;ðNH?}(G@݌q5?Z'OycZi4%[iA^HWS73槆Wх:"](_Ő|: mregtf( h9uCu>yv~]w]jlNAI'Z2+\9al91Tpk b:~ 7(a՝b511fR$ .'BH{^LUTJrfMY9؁mdFyԞ8q\I2D\WHZ ޶a[ٍRrlRWSlL m&f hqBY}vZ;K!"+sv4olD '9$(W!xm9$iLB9Cp1q>Ch;`[,ID赇7ZT!)d9G%t& ̑BNpG DKRl˗f(]XMu:V1>rnR S$ khN$$)Gn|pjuU=1(vK.c)iQ:c4(Q.Z"ŅV!MF'yGqŸUe6- mcPo|!sdVg^OM}RyEwm$ %#!:"bB?%Bp(EK6)QH$`tWNLQKP^ G|g%Tpwj"玬- †A4*4r{+Bx7Q!1w.fPRbTk*@=Ƙ14헻s?vxCδIw6`f^:;QO[4|ϖD'>ŽP͂.J4.Dx ʽfRr=0 B\U ;W8hrhsNf7{zPoLpTt"͞:YfZ^ځv?PJut+ zY+T̏4V܏5T`|m31)5 ³_[=V }36 .σ9Pޑ`[ |lSE~mpazFiN]-k6,W͸C6[.l;%%7ɘ,jOSU'h*(^YPgmIh }.E 23|d:X׷k W;MhIҚ I! x&m1ΙJyĤPTIkyjk};^Z}&j ge>+B&ݶH;%4OsЌ -+Z3+\=6dcjǏ];V +HWq@ggА0nqDy"%#evuJдM4o0GwH9(gh0<&GeLC6G1VL0nj63I᲍9PQʛLiҙM4&ȠA*Z^ ҹ:bcRK3X7<,R&Iz5}>d-X,Y zM 0/8yF`g(Ac^Z4'eY cZCt|=s M;s(KwFW ˭5l:2ZX⊄,"&- oO B ΫlX & #9N}>{m}fb`Za@swֆ 'Bk4N49XIq+Z8 =t=6ֿQ:Kd,Z?8 K]SflHT[D$ >R\2 tC.鿕w-9*2õ`,H̞*QVQNZ!t@-v&ڄhģ FfEmKg19얹IQڹM3ޅ qVW.?M_΂^}4')!!t'z8{:\iJuQ-'L+8LtXQ?na$/R1]D\:/&_~p"N@īam>&- CB;"gZqNy8pU.}^-'צâLNp|gk=u]"Vy&-WMg^8n NqŧTCS,:uxn+WH}G=loMʫQhhUI3h6?kUNi:TJЌfM_>qGi }RjMkgmz|>x-g>-.ݗ {gMr;H1~מ0fOU51&!thj'GX1 5>Ab/.>o]]w5N2rwFuU[` WjXp*\ lZ ֥y5r~Rx ~߫Wws,i8%4 RqH}4]68{U&ER[]MU0Sk4л:Ex?;~8})eO?~x`\)*}60UJ܊@R G'`&a{-r@斝^;*vŭ9kn5p7z6vcU3|V)viX3qR%(O(nW:>M㕃}|U3tE1]G7r4g Xĕ?ҰYú%SdXKSTY gj?7GЛ&.1Ee-CHזɭpVlGpoM'Ϻ廩=savv?5_O)hr@M gxf|=T AO?Ms@/V QAml&hv7|$Bm!DR %C2JI, 0'e.fûА.^ޠ.8_wļ=b+4hRH/_.dcU d)zJ:Q0aoR;reSYOTKbIxX$qɍTyĄjkܿP٨VZv]omlc)z3Ff|\ ~J?{#È i9WKD4)72Kt:@R PR[Elnx qɕ Q35&f&lV2 ڴحs?b .ڭ͎MQt1CR9duUS&m#YQz 6XqYiԄ !,,0*@CN 1$DŽqVF@Nڹ5qZ(I^L;sn)"ږt!P & <2IBu 02Nc **{&VJt$TRTdBM ͝"vY#Cysil͒ qvqqR^.hIdFiPZXɒ^Q\jajKXܪ}*FSakcS<-! Ztދ/.'qKޏQC(FzZ\/PZv=J)iiDRk: VBwn&V[KۃI˷WR݂+R\m:j lkHˉuB)UB%Bqް+V]+^!\ B{W #Bqi5ѻW(%'\BDҽRA` Ut ԢWW(#IDBq( %j WܚN^^{QA/3OԤ=j9j9&Ƈ2# 4"~5j)B -:{~M$::'rBuҢwuMK-g]!/t8nXo?|I2QK-L":G &̑kyh1ꤸ6Ar8^;}fIa%G^Oۯḹ5(aRL%e%QG .%FPAۭJ2P`e}a(-yR kd2 G)Bq(E(7d(5•Ԣ=+(]z_ ;bd ˁ+B rp )5[vazV\jӡЛ\\ \fWFv#׿20*`$@~zՌIv9liZ}[}X,:l֎(DW/܉VDWKa=t5>pt5Qޭ&/Q +UBWmc2]@0?iEt5r(pޙDDyRW/ܗ8t5t)wrtto>DW/" ~*]01&(k (ٟˤ+{lF;aކ^oZu-{o]'um/qwum8Ʊ`1ⱡ6XΚSs_jwT;?Acwoz~YxJnNyQ]R6܆ʆō66j`֧齏Hx4F,k߈D\yssuy>r%cmT|-e3B&6›R-p'$zt[+Oz)2ҀriÛlEl,ڜp-Śm G?4Q2 f 1i&iN\|&Z:UPxp/BW!ԕ1WCWBW@k'i>vrIy~h<e<2J{U:cދʊ jj•hU&Ot*D/WDWpՄC{ (]@m~5t5&Z ]mpjtHWU&D+n5jxt5Qi%ҕDS]ף&k^]MǶDW_՜ϒ{0svZ۽YH_:nbek^;s%<{vT?4n?96ʘ(-T Tѧ5p?p_ ]MOW@IΝURGϲܸ)6cNw^"]cmM9u 7fu7;NW彏9%I{lK7<-?PʑѕAWvvO$&BW@-tJq'ztk+ 8G]nrq-t5;]M"'ztE~Mt5[ ]mZjU>v([ԘVDWh=fpDjL'3Ĵl5tZjvt5Q; H:sk>c?hѱn ڶ@}ikБTW*X{urǦN>3'>P]8̮Ot_\\Wz]Amn6rM:L}בm= 4!`Q_|ۭ.<}\] Ɖ6J><'OE硫=QqANWמQ]OOaMtE p٭6?]e 'z to^TSdwyͫk~g`{_#i_orIWh7۫ffmny=GO:K`mGoG>?&k#叻x{;^/ߠqW|s}sqd]8O_n<X⿝۾W旅wxD(to?) ?1 ݿ`ݽ݇apqmͫR}Om_;w/#T+ݓK#a\H~Ec~DXq&qY>"6gR~3}D'BECs;12o uߴ7rvM-s4RDA>&jY| bɕ E_gbW"\J Q 确\ߢ|Z @hx`?.5X[c5$3y0oMɢxG!W/[E3# a-[.$Dޑ!TzJ >txQQ6)QKhDf&rxwTK4Hm%P!{^@IIW6swnaL"[@.}Bn!DlR!6Tj(:\lԒ4Ǟ3l¤"JpΗm J:9@ȦА.K*0H?AcJvt4FPnc$Նfֈ6G\cGJΛZ{!  |&4"v#Im{<ϵc]ҔU-5zY[A,1Vc''5ἹQU^r}z(%5v,#Hn/98_ |.Ca5/y>w✺I)cXO2Kh RPR A5bddU. zSbi 5^䣧JRb4'_Cn؝f y6aL`̢E#:HࠑΏP=w >uTV@60Lg\V˂ghBZ-MLyHA9Tu(:ߡ-Aka&h hH}Yq$ liD9sMKl3xA[!/`ǖ[ÑZb n%A`*.10y}ݔhgi& pJHȓENE9Q [y PPSMmzg G.)ZCx X!uk@zc.W ` ̆6E61JvH +QJKK((̀jʌ.5j|#cCF1Y)A@&:n JH<@,1j3|W4aq2M!lV(,AHP2*ZHmGd\u>:Bo-%(t,,x$t!L;.Y10WSjI!1΄.-7;lLdGY {sK6,%JE5n,\ RAe oC(XX肼az+R|"Q*L&jZcTP1`8#afe0/) fEJȐguKuBTm} ,\TBNu~T:yMrN\ml2|Y$qȝL_粅9SK,Ji:X oNBfȈP2'A]zd >&i/R+i8#(4d@R@"42OEzZ%@VӜhBPMؐ >(xJUhK&ՌQ=VF/(, FT n!i,m-9e~<\t>f{]qn$,F`0uQx@ɐ@9zAgG"mM >5'jY:Z-a5{9fQFVh4Dex31/yFo{C *3l`ͧ9A y -H4Ts ]a7 #<hJT`SE;сTiOGp+p}z dX >%XQWHܺfmxOE#1A?$oҊ,oV1 /8y"i*<$q,QӌXCFp!-\sMiJяzI*=@F[/+yRZ {Fɇ>tĤBN RV/G6G?Tm̳u?j7ޤ Yw!D!@ D: \ v`-$o50z}!Z)A(=>\2Ag@ԨzRGCXe(u'9nm p4-UIIAs6\*ti  j)h&peŢCՌHҧH !C`︥`b1E R 'R_ߍ_▋ۥ۵a\F7!Hic?{gF"{]6?4fݳ@.lw1`YEJ;A#ɲ؉˖bLX*^ɷ))*Ef;7o^i1UH}Vʱ>цӧv-"Fgbn4o vvSZgMN_CTgd TBo*溌|’VT*w"?94ReՏR,t_e^LګތGK7oaݶZʫeNVƻ@obWh^+J7Vr 'PVH Yw b{c I շ_$@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@/ 5F%@G.B=<`= *5 4H( [2 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $@L1 $`I )f@({~1$&&}!Pm$\ 0 4@o#F&bI &bI &bI &bI &bI &bI &bI &bI &bI &bI &bI &. KI$ZrH k\1$$ I!@;I &bI &bI &bI &bI &bI &bI &bI &bI &bI &bI &bI@_.&S^6T~/MiYw?j ?%j P ŀK:(?*`pigM@cT]`T1vErbWATv5@ҴQdW(I[]\K+RB UzDѮ(KJQU"P~֚tb1D2$BRbjQ]ڠnWRIhWVkdJźG8KizfP鑵QD3x華nfȮgo4nƮ~Qf75@ۀ ջ޾{VL9-D^#54Fsk;q}ͷ9v|e cUYB[A#BU:T)YPeMG$غb$,T*~:Ȼzopo?+7_cȣ&ۮ\}v &{$خJ+'UGN {c?X:2ˮdlWmzQ "cW(*U]ZnWoՋؕr>+(ȮP" nW4y!خhW+]AvPU$ֈtjvEoҡ "^cW$7R c7R:ڕ k+kyvEj]ڇhWV7u<0ʇ)vkWIRDUycWOs/ӤI|7/W(W,7+,]^USP]*+^0#+#dNXQm*@4NC.)}3H7x\26j8O+ h)K/vֺ [ڊ6|mj{Rܲ۲ ]#ljUVh T*h'ؒ7E;&Fbb+xӡAze PpLԑc䲛Zʽrn6#dIu$عbb&-w}+RE] Ǯn)ԩ9-0S-\xuA9 <FZԪOXJ 1GqƇ'%+cc'ւV{ WY$ؗY\Ԫ?I"٥ނ BJhZ]]ڠnWR lW/bW:KPVɅbr`RkzT:ǮM' ~N nrU7HtSi{ :]=† Ka++""Ԛjv +|1vrtսH%ڕeO+"dժGWR rgW$8cW(4jveNdW(B(ŮH jveup{ ? <E,L?K,ʓVcN0+)M]ݡ>URRD+&(f>⮧Gq1(-(ȮPR$E$טRzwBZ+*ȮPDW$3j#gR* dK+Hs3ٕҲ]}?vЦ7'RHk?CIw&yQ7^ۮ*ٛ L2lWmz%6 B"`K+RlTv5@y +l|1vEr(ŮH]خhW IW]\[LtEj]J'9]nRPҳ+,e9vEr *TjhWVԷ`WN2HrC1ѕ^(w"lWô+N<@YWvm:uS'D)u {JܽCoϰNT[n=:tH6>Wb.Az;5c~Wt8mnn;F;y\5g'z_o߾mx$t)ۦ ͡jtܧM7%Mcwd7( iw'|3O>j럮rs/z䯜;d]}_ޜ᫇_t{?5BXYL1tWcnGN~e_y]w(}nc^ϛZꌻuYy{f1hR=YDmuM2"%gsҨ=[UJ.d-|I]9+.a70.s9.DTXEeXuo@90-VK_[֠l+U*kԑm٫Pv\mp8E ev\\7xbr3r~F\!>vysO/f3 n>?W짟6/Ӌ=}&v߫|1܊VxЭ>41{˾S.`|<ͯq&Ǧ]Gט[-Fl.e韟'Xn"[_JD-LݴYIcym.5PV4N-+砅&H־ 4)-2f `ch,7ʷ9J2 #`r=qW]ks<12i &'RN9jzMοmgKo|;O>]Eh>}K~;Vo ?ZW:?ftNvP7 r]x :[ΗhRqpOQ;X(Œ ~iy1i_2}4"?asn^?šm髼]b܊5\t點obg+J7Vr 'dh_{Skku2}9Lz{v#8TFmdZ\dŇ>&E"*֍t|HFT#ϔ:ήwQ{:+tkiT7ƋZuJmmcʍ&9%ʬjm:hڨB7`*1M(+4C]LRkpB AyB=p}09ܧP?.^}8szœ{/|y."ڗz~;YGӏ?wxu~9fУ[b>Χ5!9kwZcTȚ,}Um˺qڈ́Z;Uo죮3 l3l}:-ګ iCYc֞ךs@ж>jd=tnwt}7O_0U/7 * #pRI ZeQVVZItKMJr+L 5Q rjj#ب E]Ӌ%F63;wDuvyw!bv,z:EU)e>,o8aX:Tİ+FI T+Vb·]gNF8TmӹPCMM9ckdQɛ^t(9ߎRt\loTOXxL0'l rjU,Sg[./o˲5uԵ›Sf`r$!Z6`ia [7xLq(q& [ hpuZlSV&8 fA$8X9>2ҟh#dct㓱"B6&TdV]Ys9+|vlHFlLDϺcޗ^G)CR'ofPIdIU<ĂX! $ 4eUxn_^P`WYr+u$c.Nˣs"sA9Q,eN808Sn](wW`֮9f|eyjl-h9PpŜG o.\$\BZ>?TKt ߽Xu҅dN[0B=Ev,R#XM(XQ,B߱AQAwn˞.;m5c-;DABџtZ,iTKIaS,DD\4uAW.FTJ%X5i力\F)s.E[#L>r08{YM!;xMz %sL*b{D843vk{ wm<yjFRbC>u3qFIn-={sa](:wB1-šUoV"ƣ\9-GJԂvݬCbH:yT:`1\ƙUa.i3)4QJC$*ڞMohee(x7ղ>axpJIQӂ,IכḻɯRг"\ xjcqhuTE-RIbCa~d~p>ê-N?aO/嫴оgL]DtV*:hNx,Gb).t:V*Os|d%yx}A\h'J6sM:L| $ .KNH[c3hW)x&cHUsU.SYu[kS^UY^ 6F]_SLz>xnmeӮLӘzGĴ r:18N DZ&HOiDiÊE NyIc-KI8ɠxguoʇ߯25֬ \w=~[*2+jY1 IA`KAE;F+Ow:S?cI_jw>[1c/P nӱ e PC \fIJG laO/TJ.IufHHAW)r-bd!FVK!)F~߾>fJ­{x%^XqtٗWK 5U/n7ry-9y҅} CKkj1B[aU#H`3aBN7u?nV)*668J;uLIJGEp *3MFV !2 VͺȫlNæݣ%pml*؋G\s45?ж'x;?||0Zp8Tq OHY } ڭ\n} tcT,8904IiOH +pH\}]*PgVm-a⍅eݏU ͵T), fAPj9 1.C40Lg(:\ zCW{CWV,lGWCWB#B o7thm+Bٶl]]I ;; ]S'jk;]Ji;:CRJHn=+ BJ0_*h;]!J::GJ[]!`DZ J9ҕJw]b؃Y4Jul\_Hލ EX#`<#_a׏\=<O߹kJ-.b(=M;,}0}0oU/ۖJ萹 }wU+EeCgگmRmKpEZgڮmfNYj[k˶v)Fj[0QR"as?x[ .-D50 0ײJǹh9YCCH @ 5on#&xCxLhl;MJvΑR"Ze)+ +PlnBMGWl͡p`'NC <3 9f(]l]:h#Bo }+B+ePjҕ0N]!X :]\e|+BkuPm+iQWDWe"JYWHWLvXWҕ2_rv"::COtXWغ`Bΐ[:x`WcoFc@.(u1_Թ|w2w*1a_&YQ4Q|:{KBWӮeJUSnZ0$f9f]ב\୧V$\d"T!X9 ^l] rA3 xYfXC&P!laCe[cb0%[/ٽݎ;<0!\lZJmԝofB#HKFBAPV;:r,[.xs:NWRu+HWrNQG ~JƷ+Pw:!oYr7O/>n؋;aG.eѯɗ$42PiPő !*$bcܯ໏?A!]d6(`:/nƳ$߄E_U6_XGrc\e"V!&&q v_bХ3l]:FD (fT꣢{Z{хG?;GE>g$!i~N_F$kt=& <(*_T:۝ފ/=GǦ.poGB+[<ʶE(w֘9.Enw[=gwqFA% Qs8>gxa%Xe;VVzʪEuFɔvS]s5:/ѠHY:S6wDuX+cE/ ^q<;l;dD9V\jZ(hRs*~zc\̺ϊE@nc^8&8T7wY彥rFjripv뼷.P-λN X-4Y/>dm~};w$#|Hz| Bud-kŅ~9t|nm~/maL'W[:UF=(ʶ0*s0OkCt#RI:@NJ Y2_QBQㆇ9ӻΩpJ_$+l?}b~Tq -`hMM~v2bjRfM]yU `ų])=Cs9p'XP4*HH=3U[ȱ?^B-4 ćUR@3 e\wH`סS6 +k/thWOWRttut%z#"=|B QJ::CRL+yCWWyc]Zm+B 3+͜t#B[o m+BixGWgHWs|ZjGEWW*_vBɎΑҝKMF1+=Lmv⫘,Ar[0Kv5Xq#dFjΤJ3$14SǍf L8N4&EJ'&M,+Gi./07}f^Rʼm98+G=:pOZzۈP-/ggmtʧ}='po Jխ#s+1NM`Ͼ$tWCWPsI`Չ\q =վ^3m׃t]:q sɬq2 f7YL{]~dc_F}[>E!K-$`&,eJL%a B%z=x8̦nw9o+h$%?Ǔໟ~7L\nl,$?lfCW_=$~NQ\^L0w<'V@u&LsY-7wvYfk H<!ؤBƉf( QLM/(S,(᪷C\J!Gz;^F _2zP-_F/+ʭ`niͳQǧ4J~_qF> 4R4XVͻ(/|=~|ʨ%~*㷥"8N+IA`Èm}z8|~q])~S~re}g쳿ߍFX>ƶh.=&wpNak4OnׯrՏUZ0{pFc 'WqYN?AΉ*)i ;ݲtm̀XrֽJGa:*ZcJ#>\<,TYZ 塒E^ a[) TqJ2{IHqep2FYhn/=%S VL-Cf hmzZ2LPIOCp3_:B{l~Pq(;ppk<|J3"L_y5f0dHbUBku?nV ,DJ+H"`%j$ _5CiMP*Ŵ`vBycZaNWRΐd]!`0&BBWT76C s+ƕ~0G W>w?| yzdPx_ʫ wC} x;VIboU9>NzF.{@:f<,Z2&Ty(\E6FHL % Fኝ15 m0$g9w5ep=joȪGU=VZWNKJi=GEp7 @T|P\ Bq 9`i~{{tuf"8JVu=n /}aʗN1/V0e9/M`"g x~@~=KU^MX8Iw`!aH J"ލ]()* ̢" TR+F.ԟ"D5RME%>?~6.>ZԘX} ?{@ǥH|q"ʼn'_|X< tG(M)MRpNXkZy3+Eӻn^|C:HC' 1~߶"2"?>ϟrPχS#?:/XqB7?%~}[y_dFKq͏cN:.w)ὰ;Y;gǘ%d.nqb*HcBiC1" WF"AS}\I5OWnrcԝnyd$`0=LˋIdUi2dcok8 !$v!Bym _f/?@BJ!KFY'=XDi ̨PZC^7-Le)SˣO!wt?zbN<_"edYi+i8NT 3>Sn~DҡLx0щ+Js9BR H*(d0 eH3 DkoQK7x?~5OUqOx7 l+NO9kŊg3Whe^܁c觘>(T{0(3<&=~+۱^,|h=?r=T{"Z,&u\(Rs܎ص38Lv.` 5mYŪo UPvMMJ> \W:ZFv.(6%ʆOy ísw#fDSi&RHӤpVQVbs'b5 pid?1Cp:spnA6Vfs62y6õ_/Aڍ.Hatn8Pg˞BnPc#:fB"VWQiWR-"G=5,Zoa+X4B.&I6Q}18 j!I9C֠3*cZE@&DܧS@J-Rɺ MP"[|Qt"ƖJ9o9]1|{Y**K鍇Pr# fh0|@֔b-l5wQlS; X+~NHQoh`W(,B68f52qW4hKb N>:SdYQU,g!iF4[سu{ ~(va UA\C=k\XtyF ݻk)զw!k\ FF ۭ_)vv?9Y (jq5 ~r״4$R8+¼Jw0M`਋xtz ʤV't iԚhB Gd2sh5F0vݍ"ǑZi7pC"EK,5UD"33hhQfIrPZɒ 3&z}6i1uom^/٪jFWLqwL,/E7\m`pd}j#>xࡑ;h ؅ w{Ѽˣ_A'ەunrs+U6{BDqo7Jrn;zyx^I~ڠ=V%OeԕRf=E >3Ja6~UdCԕӰˍr)0YjK7j5h +*c=! *Ӡ*LT[cVC(6wb#"P3A2SKQVz311JzNA ,lZN&}b( { {4ϭmt7yQ> 60JLWDs L>4B)sl^CUQew{83hv=ح7e*˄,Qg zZ'k&~)nrSٻF'ݲȾeî従HwcT%v%@A&)_n P,$YB=KێOÈd7`L99e 0~(9uu\Xkt%ōvW0ѠYX_fgFd .{WԤ):8ݝ4 Y F(0%k!KIAKQ=?> qcr嵖& !,І6F|vP*U*sf#q(sv&:HVERirR JF35thSly %ӇvJA.|Z UM P.K:FFtՀ\;|6"MϞ Q+h! qq\9\h12 q $e*xw%Y;>Cե2q3%-ӰdYvl WF DŒi: XvAPh.Uk& -tyUڔRh lXDx:@AS"B~2,^OTإDb6ӠRP=A'%o;YWdChVE`koӰ?J )#x)J%=ȫɼX4Ln]sIRIBk^~pSʄIyS˲KB5q] i 2=)Qvhzk_0d:O3uuYVk[QF)=utiTnT2gAT ,bFbr)" Aq> H 3@s %4zt#r݄5k t骋_=uA kanY*ZUNVQd50 p O y԰2BUh/"VOBA5?YK{% 0y|yiٚSM*S+h=@)Ux=GZL%nDn+qӉ~S{94xjY+T6\H9TwY})蝗2wyq薸G?^vHk~.{T8Ac0,a["9֡ƣM 5g\\4_Eb5+,)HTdL素{ d}mn‚ *JpL*T%IkC<5Yٻq$Wvv AWvwNˇׯvfʶdS(۲̲A#wwLa`:ڱT>q[2Ty7 .BȤ1x $$F"DX:[\,XE:[ :[Rf='"`sFUVUNTpi4p% Ubam6DP*,RAib-M;bf m& NJ) ^q^q!RTklWCa)"]q|^#|:| pـZB-v&fem2- RE VY[OsU pEs4 Ķry1@XZ |S,Vc}5h3rP(BA-5okgV6=C#(&g.X#?l}: d Yߣ ]o e5T|Pʨ 9pJ4W?CQF͂6Gn/ֈ)BZ-7sb^5s[-JB ~eMq$U|onmh]ۜ7?tae7e)ޣ7x#/[.`?>/BT9z`I|DZNߚK6KϯE ܌BYk *U"CE#ceM/F36u]u)\IrVnpO Am*y(o;I.v)O>/Ut Ncd0UHK+!JԾ:?x`}iPqaV\_h{{5\|u V;8bi){Y*rZ}7xۃdd5ion[᮸j҆o]۪1= ob=>sZCpV7=Xi.YBi!F )Q UՒv7TjUA&m7\SG o];w#H:LAz~Vd%ut\3sV/gouj=iZF(ѹE`>@(4$ 2rq|Rwot" M}jHؼ '\1dS=ҼGթoo>t B?F}>W"bւзo/jEI9}'f0c9fgqDV}g0C@"i l};z0delBt̂$|?? df_??7}`2%M{?2Ɓ^pÇ: fIsN0`|~eso(7WJ"['7n9FlsCdU8ܸ[l^c"N-܄ԊXX=4=QgEB>޸o$Er xgڧĘ5[be]|`&DTY+.b5'0]uu@[IB T<5p`D7.X\ʾnQ9n5ȴxYX~ >%4o[֕WL9}*U(KԙKAnXVlhU^%.,! U<F #Y`~x.tˆAf ul-(HZ(c=( YP2AC5V_I͑ZNE,XgID|2҅к>zhW{F75"j(t\wSg|uxW D!phHaXYc_rBKP.*=zRu؝1 G*ӎB1g-c50^؋VиvL(l F8TJ7H>KxdyքS> @Alߊ3hoz|Gz{zӹ1#:xeʩCF1FՁ5oMm%YH vwhk|0.36cve喙+`e(]1,nv1`V8{{+xPNd-O@ wkS4jf1~J6 Q;HHXySl˄8sC[l*v(&ƿd57zG0"hKf.YB{SB Q[%–̺,X}h%z Jy ]'kd[ @M۫}!WQ#~̮% T AVnt+.)5J*/LM(<6 nb* ٞcY!XBPةO\l.qzâ0z+n a完6ݻpF"]ֳB%*y]5@Et_^>,߹ϥ?GA߹ć}IKLw/bZ|#l2NʼTiRQP5BT;mEE#z9U3߅>|>g@3Ը 1#]ܜ8+.ο}jAb;o883ND006~T \dY=&X!@PD_x(P5.$DSgNB`zlWÄblgbFpX1>D(+rIi`wumecHqR:ն `-x25GiwqeK%5֣%Xjj[FEA'k]*> 0_$/F1qK h^LkŮPTpyA%w X;WJC#u_:ǵg,7 hӻ޾(¶$GLۑOO GV~ڻyxg@=:罝s\G-b~nOJKTT.OM{Lvį- E_ AqْNVBNF,!b|%SohvB^fsf9  m.4#!$-{ɰuJ jzOr-UMkZ+ڝ4L+6(qag{N̗qY;|"Ǻ|:ۣRBSuZ~ZofRn/@Nxo&C;.6?Cv>5X&MQwEܠQC`[gxepki2wuA{sT.g=x@۾]ֻY1 Ġ3ƭ ޾C$(|Gɤ*L (Lq~b7)BsvFQs:Ib~G~~roc kBϜ19c8b4γz~߱ F(ƾ/H=:HoKhΆ}_lE  vm̖s2[~.-WiWQ[zm40VxGz8p林at𯟖!ý_Wٹ 15%οz/6^uc~~L״3+(GCyjZQ:=*V5=\N n&Ù@?{1?rjEFzkiTMkx@+ZRA k,Uk)E;oK9R\H %4ŅV5m"> p=-w6{_j(&e(7oO2[ .%G t i+h \ڪ3Ot6"Lln \eܺY g. 7"J A*x 5"VAd]4z9Z1=2`9"r*շʼn(w1hL17Fi |4 T4AqС%xr窂D,*~&ww@dmk55|omew\ D5DRTAc}xxg iJ(tm!t|USiTA8D'4AkF\#_w)#Ub/hlʒ@~6]?HXh4(#A9kB.T &`ˤZb&Q&d^ dsh¥WjBMױﻻI?_~CoYzỔ.X=/~r%Ka^piF9Zk%C$&:,+^$J{a0Cp--RW5grmZ1;,6ZT`eAV:sD2*Ft="aWtaێ_zrt~/ld$\BWQKLTAcGK22pPN~:x,9R&puZpc]9x݋3 FMqcgΉۮqqܒYe䫄H)0QR=}'F%%y bpKJ|dC/O_!UR0inTc ࠘\'wPHEpZݼ*J 6}JimpnT *d(V{EdCp1qԶ-\ҾbWR,xݩ Q k:wZs4Is4e͡%/ ѨT JAlp֊1aScȄ3QX8mQbKLv|SBom&%Ū.JD:d@VCU5TIojsBۂP)N`,ERhib99|`o;T%j׿mv/yX՚Ej]O_W:j~[Jg'e%& ',AHM:0~$ Ay;׏%U&V9|hVӖ}uXa64#g: br:tTT^ri29nW(6Kvx ֭}WMy^̬m$9RHtɇ%T'х@{CR@uEF;3MnVaۏN;x:L:ĭHˀJUP*hlնn_PB 뇲[:1+&TvTLG! xQP?Arg\T)P+;%kP;,Y3x b.h0v5${$ pWR"JM7TAcI;JSQ\;9khCV1Ij10@F( ds!cB̡Uϣh:xfӇ:Nj/c^6PO[* o,Web4Mv<9si~aY읱/ޞ-0߬5U- r$  WovxW͓+ 4)7!Q7ug)tނO<xZnkt  ;Gޣ Y2c(3 ;|Fv l2JE"9Q,ZDƙ*J)]a5:Nb1Jɕ9+йʡ O!s},ݠ}2Ѿ3;q ҥ{ǧLdu9ҥ7`m _3ea^ {)2 1v:9QQ# 8ģH1˥p.ͣnr1*A`c)u'\h^B¶:MEq@J˽xUPFHd2.7UϘD kOK$?M6/AC)E:*Sn]Y:+,'U>|Κ]AeI a4]|2fe!rS id_rn ũs(\tƞCRؚ Qn| 2rA5CF: DPا))| kd◔QYȠS(=;F)aqNSi/LQY;-/ *mfd&QpO,5.^{TûYA5߰ڐϫw+ht^4nX< TиHĂt^xk{1;q ^օ'[f$/frlIh8!yr#9 ب$r팴C/1M? )l}a.g9'xwQn-XW l $y>Myfn\4~Y+Y; Kj~E珽ܥ~![LGy:9_G1b1UO??7|4҅oe̿0מx4eOR1eaI]M|t6k,g=>0ޜ?rOmj][oɱ+^N~vAXb Z")Vj(hf(,`["knUxi.9O073w{ŰSo{o@׿/={,jƒ0X)ÞZ)U\PBǒsM!4XX@b"{f0O.%.`As,{,y]Bƒ%lᐔʹۙtl/ n%^# BhsSqNj)N .5z^c&HzuL;<|̹VJyp w]au~ x}^&YiThC:P[Uq.ZvᖫA%<Fwv{/' ~ 8j=sEaʎ})X<RsסD%qu]M&ѫ\'ҝfnQrԖ!_Kd&;o0$9F?t.?G*(!30kpEP,{'5,~7jV^Q WLa 7 r^eDƹB㍝ dK'õ7|`gy_ӁȆ+I!֫ F% <=LW?/E2]$Y7:x Y+)ۊjBs;\TmI&r13|i- gԳݢ)֚ ^$OK2xO`VABHJ:O Ο+]`!DZ"^0mӖ4 Wl,pg|<~s+m<1ۓM|I^xw\+Ɯm;DzˡSx+zҒ-o Yr.1ŝ??~yȨg}ྊp´`ʺj# deHJ(Z.3\X}_vs?Z:Ɂk VcPh?7mZj "X*  "E##J[hVJ\s^ELd\12Dp[znvm0*) 6! ƅ)EQ$+i#vI;."&J n;vBDB7ח*7$W[h#' Y:=9"6_*7p)m.[(ķW=0ph v>_x`FY "EkkΪ 5^>r+,)ykw4pRשD6=*fa-0 [3sSHEfi;/aјced,yQsؽRE7l#}(*28ےB5|f%lT¾Zcgv)& P=/?7=ռ+l\I(5T4ˮoh"TYE%[hw![iD_qRXR"B*oy{.9uem{`cla3??/~*: b>:5mg|{Bq*F*8֣Kz3Yx13Nӳ iNU/G)e9JG9fxk*SyeHǓ|o_zŽ(|ܳQX] ʗ;<^[.*Y?;WRZGpndq"Sn^B[# u*,oTATD$oalm+?q)y`ԼvԹ`ZpJ޿ !p,Ga5z}' W:*+FBW fW"@ffǂmp=gML,=2(8"FbMLz7?9ӢJ1눃Ztl(}n  x$7Dr Atw4ݭzHQ6cPsy\g_ S`7Nܸ|Z-4pMj%RӐv 0jvF\ɰ^4Ćhy$eBaI`*Rb&hxI~/Z14w4";:so%|?/ '(92jV2Pž_G(νz\Xp=#[5y嬔2$D+7BfKg|Y c8@YQjpZO?'ˣ4'OfK.eUfNSH]^.G|tr$A!J]ՇB, 9< ( 6O>Obpx%`PI$=a@ uXasa)+U;` NKD$(b6FSv:#3ki-R0/h)nkcN%#O"Nt"?"eI:8)4"ńXΒp zLJQAYBGkTdκjκkwH*䑢Ч> .XȳHkEDE$FTj%-ZRS&Rp!zH(2K!YCQ0Z7+Byj:x1MYpۆX=U= p'JhxS/Q`9$($ a=~2[ve P"oXQln{-rOVp/궿+.RvʴT]˴s}:۞HtGzo enUTl3 / 4'(7f9<'ZΜHHފe )% zR2z<.+3c864p'T%Gmp֛5i^6X1y& *f4r,#DIW)$ʃUrQ3%Uj󃂨{ң)q%[qܤ:}.lޠʝdZ .0D`a(Sr0QR>C$[G"pBKx䛿)SaDv&]WX>+58%ܡ_Tyz0=^Eg1_+V)^o&+6.ZNOS%0>2UP^+ z1 }L.4IK s1㨨^`8`5yp A{PTcSCS m<LRJ6[pQX' r=a@DĞq]Pe1%dj0h , I]_Hĩ[K,.SG5:W' XS`& t ͙$ ޽<]I*ھNzۭz=S"jzjoal>N1@=Nn[Iz4īֶcPcc&b /w͒,g2~}8 (>Xl< d>|_jXJhoy,M%c-*G4:1ֱp=B$*_@L*"_Dm&<I@] ZR-A qB)`n0t#:q]Ƀbٻ8n%W ?vH*Snv_bw}i+Vp՚gdj@0VwX:Xj@eg ֲi"X0aGq4>7@ gjY&VqB~`ȨcW؃9,r?sLf̛QLt/6<غ.uR nM1ylb@@,۹$l=0Pp,mvT92BO9'e./{Dz6YոW CLЕYDp]w<㖓L쨌BNÇ#yOnyн_)Zc+MC $M&W^[XK|2F.ӎg| `{)kKS])'`|d͈|𡱵 z$eP,ỏYmW[FԱ~>= ᬦw|Xҽ x~R[oNO\t(P֜95OVNa,ʐ'{lS.ƾ#6@JԤ0W増>hQ~{5n/Ko16 [XOC*8 z+&㞌S0&xRK֜B|bO1뷜_$9Zg Z$ ^s"p WվL4_soზ9vp&O TE'T}b '1=.X;T]&8 gu[J`N&bG+HTi. 73-Ҏ04bo540nAh`u  k54vH>Rp`J 7V= AiBp%͛"v=zN a"@V`-_pۆY` yߨ~X(</ cyڷT([^GRܣ^3~y󯔂yYLqwȎsv ?Hpsg}Iɐ_z? JMe> ww"1ژJVD1dVI0nJd8AA> p|?ZǭS}{gzbo`!>Ig>ÈOGs/ȯ1O{F2 Ɠ; }GPիj6!e#˫_\)a2?_yYo2? .'_LA<" O/. DpaB7?y?Ooͯ"߿w{yTo [^Kq-A' &ؕC+cp>I[jz\B(럧?:k>\g_{hF|2|gmS%i;{)-- t8|[W~^a~^aWӟji" >hۯ[A;T(jIopwvcOCl"=؈10LRF]Ct$ `h3_UJAgt11bZ"[YeUO\M;M>%c@a2XW24ٻ ! jkR"'H[)Ri\U$[R q; ࣭KdH8 KL)n#,ٴenzDk8j G4oG򧋶}^EO[ouc|<Suq7o6}<լZj_}+!N~UɉkYcQW]T <1NaCV]7lP4r7Dn!3yr[1T]BBD@e1j<vO>|֎Clw\ <;}v`McςiR`}92 Mk%.h]Lbk 9]$kO*K4´g$뾚J.&KSA%6V bKy9%hnEYy'fF @f1]k&hL9l+ ])[]P(%bထeXngakXCͭ = |%-i| 0-r@PK[MsT`i2up](~k,žMvl˰ FnT6>̑!dوܢd&Jca[ 6k‚RY+cʴ(e]o~0?e|(*``G!*`zQF/rpuK[fFo 35Z|0j( |J0˶qt4cBDP&^# R(#Ʒ% =m S !i?ఞȨgT^.hM<5Ox]&ӫ XR1pNDK$XqR<*CR<*øJ,;" чھEvH M]&YʙUj1J-w}6=J 2U@i)-ɵڐ@Wy5l`hՓ `ƃњ5leނuF`yO;O˂P;\Ǚ`@#d&꧕MQk(SW޳Ӽ9mD{6YQUw}''2⏷=|:5Lu0XfZ\rzKPS <( S>wE}%~HK7=i "+\~99K=澽+:lXs<+,ZՉrw5Ht3aFYT̞G; F>ʰʰya}VTt70+~F {yѱ͕9<%0,b-sb#6b +6qؕ9>tE )]/T\_1C }t7֯ˠ /U䖠 ?*`Ʌ-.+2(6e|2nצۋ{nmYpLAHPLp}7 W Nc}"0V/&Ty8RoI_~&N`t]^H)S*i3JW+ANUNhG*t00h)#1<:=mlٵtiHj #7A|2cMcKTv߿qTri~#z~ǵ_/.-Co̩Vo6Sa{Լ`{Y7NW1 y_4ruͷF#Qa9 D\ r*-3|66O^dvm>;}}{l\.3VkL79v';>v^r~9ϟ * ZҊ%b2@ʩ?{WŒ eN ;uЗ6b=ȞDQh)J,  $U2#=3VQϹBh\hWRTVtŰ[9 oami[[3e}_ & >׉~Mo9hrDޑ i&@uK1ّ@mhy3>kͦqV_ ]Q}rB.`fUksDcߙsއD[%abtKl[k1Ub،]&Kdi,nc8#~8]hE9(P#d+u}cGc L ,pjYPߌ[0mQ5BU}]j?ڳN>|˗޽_J˷-GY9y9՘3b.?Sf|mq%cYW>WoW*`ndՂ cXLN{~8nuf,#wϑ .Q{˞6Xpˉm mٕačPx r =u1ԑ S={W5CV{3B8XuU*yl 7ūSL%ה^s2Яܳ[yc/'?)k/Lc&]^ڬ,9& >AmN!+ NiJ)P5h{.D Ko9wju^{U@R[ Xz靖3p0D'Y[=qzj^=gGugp? w!5_$YvgpT]^LFJqwE!X-3D5[tʎT_Z34/w},A4MgځZ 4?ONHsAR^A2T&v[ޠp6l盡X C:eL~*Jd.T  >Qr̓Ԛ(獪>$:4+>u1f!ΤZcP*n=FeDCjx[Eh1gAqӞ!Xu l&oUcRu < V]rDkl.9?gN!Q& O)-c^>\r^N-$c3;FF bQ,oTI~HPgh(^o~˫Jꜳ`TN}vMZ=NO$)B𺳉o3f]PI D)؈ /kГ9G>w5| bو(nmwddXF$[&U3";azSmg(gC$Tcqd&0q2 'N2q>⵶eq5V/- A6SH&PG EfMJ(؛!ϒiAܐ.lToY 1:/Qܒ$Eo8F~qLjAPO1#;N,yQ Q/1rj˼i"qX%dnq[އ "Ik)$jTlW$MB(& kл0(uj99b#-9_3 <: 0lN9ӛw^۶h,ݪѡ{e[ܮq$a)rq>2əJYff!>z]Pch!ћۖ6e〉`H99!QL첀ÇfMKJk=ԡh# ,]sV9~ti‚B h2ǿkR_?Sm=}:>Y}X71MBA%M hGMAzӻm/xeick﷭Wj3S4{eߍЈ@3ZJz蝋Kg -FqP$0ymqo޻=yjHjh9J /<Hho;eu0:`^q-mFϦr4|5msY/w! Kqs~k!O/~R}tL Nܡw'k!* 6qfdJNqxM)>j(N]0m欁 {J,b^$pD5qT҇48}/ڔUT =ZIk=q1{CtakZJ/=Y\9U[>nbւ$ ƊfAUcU̘ID@^YbfҢ!8<~eo(eAoCvx?^uyTڐ:=o~ ʴ8ekAFBW J]|.v5ڸE.ba;4sA/zeߣۃCa^7[r.|cU9?`Xix_/ NhxNS"f'Y{Hxd]j6ZTǐKa6C9"J`DbJJ^3N::<98K+26c \HcV܍) /J^[T k,1rMH 1kzRqv"ekJ]bO3@M\kNO*2VFRy 3IHIB<3kÇnx*55:Vmsg/?^>xaB+ [7D<v#5^ꀍrO59ꡲH琫ϡn\0#uؤҬ#77uBdz{8tL/o h> H"eZ`R0  (J}]H37JB`0'T-4W{(*kډ̭)}}ȂWƫt?[ ք4œ=_IC)9G}Q| ZFԚ/wR4kwYw_93; KVxn_wfxOk!*  Y pBKD)(ӭ\M Trj'IYɾ+⼼ Yo!T@,y=f޼W{w"wꢻ#\y< !i .s>o .y0DJL+*\}jM2ƮR%<$uue.<U^9$nFqRS,.+ D<7tU*uqQ a ˢ^`Ɣa`հ14og{V9!>PYhv;fބ&hnXyv1\hYHc5̸eɁl}ExB|*YqK*I0e0ˡGB5w:.%}7r^b~y MU%E]>͛=}֚d`ũKf pHIu9X,u :&iV$ifNBeC{Jc9woJַ~HNw0k=KRr sL {2k:`0Kymf}Bm5JOΔ]4AW{"4Ӎyw CRJayǛىvbKVV6ޡ`_z?^n!4-o^|w *zݏek8Xi`z"|ι)ᩰ@|VY6SRnP5coFqTnTjt})Gug @>;7xR4{CƲGa}LSQloA4c*krN aXU,juU-4ÖF=]0-V+S>#PW7Qs#A#] p6=Qrf-*i$^m}a:/7@_`%dTrmɟdK&M#A2[l%RJXÖRp  }Fk5hm=hWWѾqpkh/IrU8qW~ ȄEVYm/|Ku#Ƙ!2G^:v hŀ>XO*]q4[vYy+}<.g3xQ/ҹ7\_5s((eԒ $ {J( (\CȵkLkFO4Zc(`{:i^IcbԠ2u&e(5pPES׹i)8$I5pl@ ('2fѤZr"'dD4p.JD!mi gu 6[ %E-yS/wyF^;*}{n(p^`"%%+4X2v?h+S${UZ)ypo S!2v%T`YsS[1u##P(@L}IMbS3~rωR 2T\HbMbVkiGCtA~f7e'ʰWl\Rn2ۻ%ZF qyQϻ> Z<:9Zrr|On{hg=sw%z~g߽oyu/mQ^FrutSD`ȺT X’]gAuR{[2wzWޝw̄ ,$~(g4#t+R˫VDv[Ӵo=+<쀕'xc '21Va )^ķQ:Q263qtX>7k ~;5^7=xzL6<ݓ=xC[ m@b]T°ENdpT7tDPuFB4#&+n8n$1YT+nFLրgHz5#Ap\w?fp EKfOc PG?}bF٢X\Iqg.o@P2@̵Kߧ- ˄u 06 - ժ6BK*j蝆s^" H&ec{h9$ $oiZOYaU~ sz]H"qW%Gre+pA| |w8Wؕ? Kvof OЬ1@w;<2#4MlJ}441OxqpGISX{ai]-yʃrS(Vϻj(ZKAχikȸa^ZxAMlA Y, $G5q&(Y׻k"lN98Fm)XeUqQKxFɷƒf1lm6>< rĜlbZT~\1u^f\級IxYPQ&s?TB6rV" 8fGJZ A&M١ך$pb ~\bIFM0 f|@HpCŗ0.U\; ' i^(SŜK@a6arKb D@FdaZ- *"**Rte%!8A@8jpTJE.sJf( AD3!v)u3.~] 9EHz`9oOC)yދ?&43 gҧT F;h38Kk; >s}y@ud Jti#ooݚ^sPA&C#:74X9 ɡך 0( |LBaJ4ɋkڌ>e|5:# ,o^_z w `T0wӆЈKYŅWJDFHWnj6J z>.6+4ݟc $ٻ㍲+AфcqVdj5H,v+_tǩx/oERo:_Ckb#o'W{,r[Ve)>_[ūL@bk=m"k)rdc rlsUe1M,PfYbi4u\ɹ*+]uUzU Q+x!蒘7*^^VVⴊF] Rk˘e|I-Yr/:Y_?8Xs,z 1rq<abYرTb3hЛC^}ra]bY-^'Ts(BIU;_2džtIy<xc녞#30BG Hnp=x+mW]jgwZ@#%K0S$pjX\ X:k]Ej׀.yݕU+PD}%TS9(K9B{T spj !%q>37qkͤn/n5dœKKeY|Rh?}0ybRmO۾=,w% . Oka/J?;xW3JF`~_L>Ee7znA+WMзhl}X߬L0dTiR(Pb65Bzߊ?UVzև]xk5{?_U8Dr)p_'~YDk3k|ڸQ|߮|HGWE[ʏ_V&?5ɴY}We/h:ka*K#!վ\admmIz]O=_1UYXJF9ɝʹ7X#eW^eHcW8y3YNuVByh~|3j`j\e7_4v]S"I]>l1uޗ5,;XQH=FºXD[N[Qr9qcO2c+w]^]2{uyj yj)穕&I:̹Ǡ%HƝ̜ZZ+f[p>i%ucҺM!*PtvS'nM0S4oŸKa1)W6`pBNgx!$SCZh 6YB6Yk:rsf"BO9ޝ ޝ)gzɚq0D"TJDvBp FKl4LR!dJFBQdd*vE/C.][z4zIlM:UK:#ebT 椁0x-Ϭ >)|OgZLC8_9_ v=v 磡g Me0ds%lrخ9mr+9=t$a(`Ry*&C`H"i0pB!1 ,FVi@ް0?a.CFt\R0hPf=rg 9=fdot8<%US7QdKC7Ɔ(CcMd&CUgOVF[A9/P&.;Jh t'1 MX!6"8OI`tfiz0\"-1`Tqbkeo8t@ N.a\Ĩ%R[Eѥ* <7Ѱvgдb,J%-):\pXo)nj.xz톻kbɁ/X|b4QsJW1>kS"%!)UTpĔg2Uc>6#\`e^Xď\TF|X-2& E}ü3?ϟ޴A^_BgW3=3mULewýMÐJO^ J2,_9+aHN^ȶcpJBeTa&%\0Эq,Kh&]􇗳_;Xa˜]a}x2QBvI1gL%b0$eځMDl V 6r}sUnRau$R]XA`~pj4kdaCT-q(HFcnƚlL Gk*ŀj{tB5`@gnlZC# =V@[6F$bz6 XMLSS vUSJMO Fw]) VY_߭JZuo(gjP$,\{=?)F'tB >kvz<?C(0Ἤ.u6/+ zL43"Xꄥ'QYB1 t LKOq00'fqp}FCuaEyno ̩P[]ƬvGd\'Mٖz9"l|8`uJ?r0U՟+>-aȜ8L!{^?R8DU߮2vdIK;QVk4>t7ׅo{޶m<" ڴ($]p8(%U %{(jUHQÙsΜmβ3xo3biع s h ΑZP“;\eK8\O3R?َ$Is[4C(z zӔY=DZ J3V5~~)a9هsJ;j)EW3Kȵ~C8Ⴒi(+]*1%uIQ }ht"6caW^L?%Rx)S)O>,)-Wi m)7۞;.ڙv뭿%4R;*E3L#5xWg V JM(c٬cdE44d `^˄!,E?x3|ƶ2 Gh(qqrD*B7b[aIE!F, 9+q숒-DW<Po7R$pP\o|Ӄы1gȆ2Rc"%PRSCxjRgJТGuE(*6,"@h1 @1@Ēӡq <4h68;\`=H*D;#|Yq7@apIR§{yɸ!Y"^9OQ-( ,Vn0z89>Kc2k<,L(E%qbk ɮzGb7jHA9'2S)bVw\]hcƴ [U{d!/2a^kmj^P%9Z$\^#`2Xڄ9nf#T(fG jK,3f,>P$7 -: d銽WĀ{s0?2h4U)un[fCIg[  KtǶHQ YTjԝO i;m2&i!62Ħ]شrlgv%qm&U6$D;E 'BÏ3)gi[)y09[[l0=㲹鍺/T0n-Q+472Y헯M` Ɏ!`ˤjk>j+&2&j.DmUgh$SR(*~0b%Lp QJb"TcG9!e13I#6R.CC&(2{uu#KZjH8G;P!25TĬRD{MSlE1{Q8Kk 4#朦5f|uFEONzÑ"cO5p55B1X/nf5^x܆*#@ V:`Yz3]xcBHLyWXsJ2ybT(2KR15(F^+HTDYv<5A(1.6;EemgǰbU6RzB6vmYk!@k 29ַM dLMfOEI)Z@DТnφNoLq+f$2D^AxY ,٠-ɄC K/c&@ 2m!3,Smf}Q-3&en5APM5<_҂KM, H04ܶbr,™6:T$Û?lNeo]3hφo6mJHm|Sf$8I8N)}BJZz(Y'eʭhov x08.N薕@ ޑ˥ie,k[-x[OA]QTR<]}߇t2ͨr" ^Pꅦӈyr ErRTM&9&/So-B.FV==^GjWU^x*xPm%V(nߝ]$1 Xb}g rm?6uoyssξYF1ƯOӹ'k1pr]#{V~n/+O>MaQIC0tP0,sEў$D!3B3R^ppMM@r0og"wzsO/xHUg^v;=Ԝy%;BC[$ycts%N@9&Bm]#ՊKB$0K|sl8[i\V\sեѭ&V]8ed+1EضWƻ\V-o!BH&+*+td* ͐B)=.d.* H~Yi!D Hw*'M,Rz9%w//e9Rp (\SJJXw{R"")B#P pi<}з o` 7~;ֽf\q6lM>rܴ.>o_GH޽SǴK'ŷBluSËL3t3AfX8}z}s~n3٧>XaM MB){q[bqbvәj4ԭtӽѓËygv`MsXqޱ"jgsm M`wh]`oC_:m|ԇGyD ?IDŽ|iϰO? x9h+rYgݛO/abU7ݧ?pj S^oO ­_`̂Yc`K0V鄞//9Sq|q5&w)f2ĸ ~9~._X?)ƪpKk ;@܏Y[ k a! ח aa.{) I 3nDt~hp5~K3ECA~s ~~?7T}ޜYTpUOp5Q5#sE- 5)t&\yhԇTqOD*W:OZD$&Ws"wOySX IF8Sכ7o֏oBi2QhmJRlrYְYyOf9,'͒D_aĔF? RDRR,IZ7}ZG*N!iI4$o{;I{'uP!d"(X3žM`, Q7IL"qX2։Teđz| 3*v4l\clԥBӕ P$Pq-ɔx\I(ߗ.h1jޕ''}g8'N; }W_V'B;M4MH:Z" k* a?'钫T`8^q"d;̕JUmvvR+7W9f}͜j6׮Eh[bnK`𝕘oLhvK|+Z 8=5&#L@$%JH5A"Υ^s3:u.k`FkgGcG-E*Q? P!!U~qt5. =WPdM0̈́4,D+VJN{|D@%+#tNt:':s"ODs9Qs`U-3BĦ%L[hyS="HSᜨJ9Q9QN<3gz ,FGú$r;D3rOsYrv/~kV߄WsP)}e5FORD+Ev4[m.dƌcnt#a:ЃxN*f'Ǻҏw$:EHA ^1 YO t$3:EU}z,U6TǯF\?|m{mQч(sqfs@e+kƨvSP,4P}QGk_}aphHA7_72eXEG}f(4o{u}~-~-繊;#!p@# -.:5 4 @k:6/ZZ-Ύ6ķ;AGܡfWaQ{L*nIoN4FF a}1EӐy\Q THɠ 't4-ŧ J1MUg'_[\5j,(nԵ1ij况88NitKJz =LFe 8jK&nؚg΅ǴX55,T%= JYkD9pшG?^J9fasciBFM HbvH @fSڼ1dzV KКa9E# W4F3 JK*w=mrD0<-7,xE!{ 5A.$PO:C|%Ҁ`7,Ux˕ky(?ۛ8^[ho[7F~+r!%JQy5`%W'2ڦ?vrk6JU3N 5|2I{@M NqjGtx3Z/ õ &ďUY*j]-=3q>yH ε+hhOO㖫.~廯ݴtDI=eo<}msT O>G{0t=~=/K|8;!%MkFaS{5bi~|ڷgg|-D¸(o>B.'>h/۽qMR`{'`&$ъ><0?l0U=4'ēDlӀ?ԄQÊZoQ߷v7[ O龿o1=B! >4_hv#:+ZO^xƼ#J@lgKmU.RċH toq$R *W>Ft!1S |yթhEV}Vmlh P䓴ͫ!?ͣݕCD_3Dh!TF;qߖ'?1ˆT(_Q,[+ֹ|_jJ "o4_$fW# |I;! 6>Gy@ fV^f##>el :͔wخid"ZP[LJyPƨ3 T*y@tV;y &p)!e)$ 5w69UJ':oLE֒ucya˧9 60O@0n<.0oOF(fкX;h cN> _C|um7T/ V^Kˇg GSafΪPllbVN>Ų?U E!^),5d`~Csh/ߜz>ah!r._+%; llY_?=d͸1[I='df:-QlvoohwGoGSܓہ|DyBIʁV(t65849e:=@r=MQomy.vu{1e;ˢ]Dl2 NߐqﮦɔQưܘB VӍ`| 4 Zzj?'ӍT+8gp-1s;7o~xꊲu9ſv>zbH9h,6|Dnš`#ՅJwfg^]txɛn$.d|#ל#2ί/ťP+ǽY8 %9ChnaWy9O;Հi[x)ɔhs&m?V kHDtk\I.|t`k58[ƫ"{Jfo dQa1SZ;j lUv0 J'PwAyZW3JT@$.lm[ʻ}\K|dg//3|qk7 L.&b؃m˓~ubߴd}?o{=JؾFN^WY赑b*&v.NGFb_5ҝu^nune>vŠ:ݞ24ܥvQ`_U=WsE DF?vŠ:ݞЋ!LUT8E+84]ٯ8^l/ LO칾_^|h7^<Âe_;X o)m,|CeQpjk. \[?lQiHbufu!z_hCzU(l<%/[5vT eQSjƴ8}Nf%XnVnW͆A-'UxD#%0ox3 E jy_rq"IK1"1N Nȓ Wq@)rm^IOO1wch!Eg+I⎢Iȏ n:cENjL]qdɗ" R+DO*Rh\gXI5K6Xn.VJy󝋭m#j *zΊ%qٗ<748u ^j 0o,">':tzMYMFc2F|9xu$(oUJԪ"u5% X11hMRhjP: $pZMqS v(Z-q!Rsœo@%wˋh%P Tת!(5wRYI#\ ^Y$YD`)e LhL c\@h!"T BjR#"pS6`ңs$\ĄszNxH w1k…F)pu`h0qn0$-/E vYhHTOLR>d Ⱥ;C]H&`#Hq16MзMJ{CPVp)_Sš3wZk!3ۻ 5};CT8 (iaCB7̵v94! {6 ='DM`qsc!pxJ9h߫s[kmE0R m4EOON/ >}uؒKR9vmRP 9M־u& DrLĤT(P.CP13Q-9iTY6Pi,b42d)kR,%*T6QKGߪy,!bD9WYS$c+`Z2ʑd0lNhBiFA!bTF2:({>g&9m@]%^Xy 9fІj3G!%q<&:Y`wt6s/ m8@fy[ʶP+cvnrMXl<96b߰"phFHȖ!99!`,2홺(iu]ԽmVmDr v9%_rZZm= =l`{b+;Ε@"=N ~ܳTB>W2)};0mVmϴʒTweR""'.y dHU b"&pң*#%&*VBLYnڞ8@xDZ@>!-($ƶ~EΫJqqV5[f=!Әyl۞)L"w% 0 H)h/N `@Fw7*lToX=;U{.{=8} z w_ 3UbV=֕RcDK?4u:(̢ۖO=n X6$V:n(>0bdxbC7ht@ WLIJ.'RsΥÊ}wNϜ|)\dLmJ~GĪir{Ʃo^7<^.8\:Th7&ꈡ:!Q1YghKaߡ~" ڱ7UǮ"93vC֞Α03x30]䚑b\WP܃<]cLǍqd;>0fNàmd9"65(Zgϰ Gq~bֈ'lW>{n[tZgJ}!jL98suMzѨ1}<О)'zП(>zϔ"̣*+'r `9@i *}Z+xNBV[$^pU Vjf<XՀ 3jiBp_(?jCŀ@&LUPL,M*-wt,,uE'2FfSoJrH::[oԈz|䫽σW{; Q ʯz1{Y%%&B[CIƅ,瑥e) 7W]PBNÖكnZlcn_v8՛ӝGfq[G\>#-S0! 6:E&(!,d]Fq!Da9).w:RMcmZ35^[gl~!ᄏ3b]E/_n̐hgle\-q.~~_SŒg6X֟|qxE6X7 !^jownjZ-^ ɘhQKj9jqlZNDZS-&U-ͭrr&E:o-)Ӕҟ4W0 q BdrFh8ÒI(oQT'ݫ?4ipZTbcTX6EU%VO9FUIcicL4{: 8*ƒ ]fE-$F-c`T 2ƒܜM52J$E%Jr(*4b(*11S4v5& -姦V-ZSʆs⦱5Ar#fr<٘6u Ws3ђF ݵg~;doퟞ=;:dd6KX̩5hg|%_g2}Z\W{(oBI.p] keBCB\a[S{6ː"}ӑ~^gnۣs X[^{Ǐ3FjY,*j'6K%m"!w۬S.h#rVlTUtqU\x̳͡H{9iXL.# h01\Ɠi9 a~GBv1mH bydC`ei :jDɫ'ux#-D)w h DcF.}.d9:F}2D#F:P8#ҒjZ- SgK;8yb}EQfZY;z>/-bw!zBa`iRzsҢF3Pn!:HIzb0&75g ^=WpY:gA޼K95Sg1'ޑ1>g ~Uf 鏟˭uo/g⯏qjD/UY@޺x`10!e/('}djtmx j|JNC˼]Gҗ}e]֏8ɛ?j̹i_ź 9F \o#b&1k +*_ W )g0*pָ޴ixc^ݳԑ>$v$gzB#) n-,g{nvd1?FV3yHN,祆JPuyۺ:>yzM,2ó:(9XPCm~;LIE˫ֿkGyz9u=n ^--"^l{ V6,4O^'լ#o.;o>E154"̤wz|*3Μ%E.E^.}_W/_ֹ1LaWZJQx=>W/M_ū |:~vNJHןD*+׫H;U( O!jB8F*NiyY{ر &@|qupU9mu=[> @kvUf*}v\ucN۫o8-Ӿ uǷ/~ ^bC^]oĚ8X!R+$:#{&U` )s[wRԆI洴q''ׄO@p)R63;ԣ3w)g: C9/M怔gv~͘FRQH7,Qvl*(@on*\H\U7/nx4\ݲ7›bˇods|jl+w73bhz ܨl0ceJFb߉)mUcMn}>CgL46&g ʉu+:D6FV—aFܛ&:s\:lT1O%7+ |sŻwȒ姛d^>J-x^~b߭Or˄@ EMSk"?o~Dowկ}b͟ίr_e9zh_n%_tjnO=PkS }]knA%Lگ,^^~O_[T<}((UxAGy"ZgD\ֹX(Q@;MڐJ*l5EJ*:9-r+Yp^ tP"6grVi\>{%Eٛ\Xv*r䒩1k/,P]9E%HR+E9&&g3[K.׉ɚ2c!Q!y6BV8'zB\?e$K5mJQFks; '0,D H݁^ ^v0H(דCZ膪 ^Spi 6TRL@An 8 $0+ut/Y ɢJ !8h["C0drbc BƗ{K Yx ic`;;2ނV my Huu6uiX{|q #GCqgKHeVf2{,]  @HqT,xUoO@ Xǁ9SA$<*u !0G0(mmbDċ‰U(HXq |)FNr9mm"0I+KeJRS8 H=CT UQF2 DjbI1ԜAMu F!|MJGYZťcŒj_zrRAHU2^D"mˍ@%&2>bማR`hPq8D+ YId^@ZJ^C2T`Ȑ<Ľf2ڬF"H w͍Gݹ"j*e;$Ζ>]y.T){HH")@Hx/imZAV lSa~z؅RA!I>- x^wDžLɞVh[ ɋq`|9: [2p!nϠ p Y4E83^ÒbImB6CR0ۉ2+bh-m=^oz2 dHIm|p!O0;|MٹDy^.$1yBv`)0HH(X^Z̯{,Z(䀰Y_^/'2 |?Z/ + QTDyrbڛ o#;]5:#J #6Brᓐȣr2b(ĐV:] 2_B,D=r rO4s8c@KE^QM4E$ e)3I:Lڐp"=L5W {~dݮA",{<{3AhS):Z"ڲ?|0BWoͅ N ^mAܿt~w9%.O4؁Inf*/~nn`kx!EhFKH~F6ZF3ZѲYtF7*^k`~B+(!춁oi~# 麟rGF/9Wq?YF ɹ=RM0i41/BOss󓗇6(~qy0#\L1V{=\0Z^tz6vܧR!^"!o}!|_?q˿[7ԣѳ``ȈB/uRwbw߯Nj7)&YH?i:x&JW^;9Rwoy!c^|uSRЋ?LU齟,oRn?^|č/wilK߼r' os MhR% @E9RLw*Βͮ+ʽN; jπZ6j/ۼs-lVv$w00zߒ[|9qPvg6 ,zwQIaX[Vֻpģ~9z{un:dsiÕt_rMرb϶Y*]uCRoSR΋GO2#zǠ5j׆Ӫl~Vy:2}-ж5G=B -1}OvwЫU;g ph.o0t. 8ySyf`O?Vmm|@yg}ڮoH`JB425<&g/'F=~`U_Ye:$W* X{~{l~;I ^!i*hs̘y[t8ӝdjG]# DkzD{{|m%D1J~큄K!V&iF+ ;UI8%UR!7^Es!=P3*~Pcu1yF|J,?=G[?]~J/ ќsŏ(-3%LU!\NN 1C=eZ7i#䗾e&e[f ʲo1Hy)^؅uK W綠s }Xynwz0DTՄ`[i{Ԗ^IIݶ˥Fά^uRK8BLJ[Hd\VPiY0Gy+D4&gעREEPBj-W/z,J\pV]v̀hq7˅FEa{dM?磯(WbZ;jhOK ߷ ~?Noߏ|JzNA1K[|8Oty;5TH]+?f>~|L?/Ut<uZl7qmcSp9v3¨![[ N1 Lp[eqA[h+vkOAnm1e:-hn[}Vحf,7V68ebPtڎфNn [n]X'nmlz~B4XwkAEc4Zw;-:޺[ց|&ȦdMJpR󞘮c4R`Sr*$Q"(AIDjʼn^sstNyMa=9^NMȯ NF|Nŭ 4?G(wXrBI !* C"HJ7pji9Yi?R*+kZh"K9/vڀ|tz!vAK©[:_n9 :rqw <LwxKwwoJ^KiڋqSYfyɱC$0.lp}RAb>z/RI:I:jxI+"5pIȨߖ9!LaٚlY3VZnYåQx$i<G HqUPPZ2lW*z4FOj>}ێ)X]K?3{Ƹ/\R.ZRIbL6㼦#48]9M,S@ SO v=v^qo,uؗBv5d!T;jZMO@>z!Rv\M.z4Lӳ kwӄg74Ȓח.=5G4QL5tl쇇4adH@uAcvq?#`YJkbǐVT<܌bNzcÞ1Zv,P.{VáV {F07$wtGwzM 5`-[9{f {{YUTk-Kie{Du\KC@ح U6Tnʷ84%|h~ 7o.;Ao?L]y;\Sh2K_$Js5CpVsoCR 5xee'Mw`w_}t:7tjyaݝ*M& a6Z2-n՜&!@״r޵dTK;xR]Vmb&7C XJkl H+ic/Cnhziz%59O=3Wrd9j4=|V~/[{[|6G0Lyaeʋo{|}KSl-[u䭎aisM}b$Sm*j1Sm,k([zN>^NͭW4>:^2I 7y#}1^q䲣C`TjB4Eq޶P1M%^PlֺOiz@3]>kjְ~l!*05*?:?7K~.s|/F#ܖ~9\x:>ذx~A^,¢b*Usr7wퟋ}y1Vmt$[ԞRJ lk<{yutտ'?{קǯ--|͝^Fkg4y_tZ.,s:;"^|(/_H] ٟw}L^,zCT#A ,||r=޽ {|lf38n6Djt3|<[(%V_g^G=עC$:4O>0Y1. gf_< Ϟ32نӁ.|~U5Z}ڗ"K<ɿq} /At&RD~\2rF)nC.o#'gG\^sGeg(_#m6<q' ڎčˋ:\u?_!o^̑+(~ۦk^|M|-s Ë?ˏ;;F !d~^z?jq}䛮?:L}(5sAe9k:T0JlPfkA;kmN7S/Uu^WﺏY%co^ڷ87+ ^#ƺKnX;59s{oW5 <ϗZf4m[ \SmZT`uTrstbNYRZՐYe[޳j\cvZ *Dt %Gog;ǜ hRgS>J%}KıQpTIY _Y.E7:cqns/un}w*!D tg @ػlrWEqM$ R䧴60gf$*s+mi54kfKF'W|(ƏZ F}t(Ț^s68Vu.[#sBrEZbE7A13= 7 52 'tAK[M`SVt<T7﹤-LؐFLP7Oo|~v nTu\EjH ЗK] ?pFMh2 <6GtID "T%TRDִ[c;X*s 'to j6( E3XdH)hT&s &TB(qGeUȮ)߾Lͮc2~+(0a=azwb)UfcvpДYHJŢ$P"CSljl .a! L vK"`*aAFٓV(.49hqeA@țkDܵ8EսG`=(]cp?NY%kBT2n:9*`Ӆ" +0$uci4"QJBr1UjbPdnj|") Rla4xDq h6KQOupfoyFg()& O 7'ĕ@[,\^bAh#r1c õl`G:t0 PR#gM CAS5&XP.YB9*"Fz*_nzn`(4E>U فRedR!}UO6G\I:Mi, x2"$+ 1n xج]&m4^G;γ 2(;,"VCj)``3mS+#"S M=_CuS OJ1g(tAz{4&䎊+_` aL%Pk}q#U.NVVمX=WIb Ok3xhD4(ccP.u%6h@/&9vƒ5y.h W5ӋC׀R"HcD="<ѓV W@nk 9Cv5+%ܱ!,P"t!PVvnj%!m1I9(=V:o;V̓'AwFHl#ĕ̼QB1IAdeҪ\}MCwAq3cf8+B DMRu`kV]O4cI= wpT#YPf5 AJ5qS + *">j,e"ԿI.6e  7|VKÔLIi.2nl@?X,G/V;~scxg[nmޢmUtm2ncdi乱eNn#;@ .Zߐ;@ .rzLStRrwoossγ6#U7|s%C퀌+Pu݋/ɨk.'GB~Gz0Ҫ5[Jk͹Q:m{y/Qn;g;+ O|w~ >~f \e퇽kp=){F<.w~U7lz>=\UzxP~`/aХP,ux Vd]ΏNXv"k]doJ"Ԍ,U:)h)gݩe[NZ.7C'nlEQ8A):2󘏣wdG:VGAN; /dϞ=rȷ놭?vB%+bV; v.#7?W6Lr&8SXIW%ۂjPeY čWԦTwV-&i~zoz鞮qF/n:wk^lMv'Lĩ݌C$}rǗ_3ƻ-he/&t:~#s [T <(;Ǹv;VQw# ;ϞaGrӳU}ljM!G>8 <;@xʀ@!oa~A?u;>jdgd/Q1JqQp0/ )YSJr226brjɻTdmdv; H4j_pk.=ܽj79!# 'O@᪢$]ƟXl)MbNu+!~Tv odvIkiRz;?⧽ɲJeʚ6_aA}(&$mĎmee= 5x4AJjm,H$B^8l /+20 @H^1"L'*A''Mjj8ij[EĘeIS]'|P \ZhSm]aad_kICl9|s1Ep$b ۵LFkfQ (8т#.0UQ$ Fb@jb s 6-(I?Zk3)WG'kS}ÇUD%)Xf:dhD3Y)+D& >DȜ i64͑I+g~f؎I3Cݐ(=^x6p/Wմl4,~0ne/󵰲EOp37[ĶHWUsa#Fip_N=5ښ03EuЙ.l&d7D1~ f~l/Lfgڌ9(P@wT]O|̡>\ky[dۛo]#-Wٲl LKl! BƚHOlQeqWGrVS5?om%9~Fԓa.oل6DOǦLaia9|,F~~nfO!?[Y fYoxcL33ӈ{ SG^q`Lt'mjpg[Xms󜘤/mEj[жP%]准Um7撈.mP2VTu޲ޗggFn2ףlw?QMFЫpf^?.iCMs%r|s\L#s|'NؚB) Q'Rf>OUs3ܟuzTV^Ǔ2O*Za䦹?ϱJ{#F#?~Vk/?Yp^oHsD*Ez>2 *ck/ 2ur葐}ss;klx!Q'2rS ,H":XMMપ;l?p=ۃ,) f\+`00gs6.RovSyFo:q_ Lb9X$ej,|TvP"P`ċL5vL-5Q*(@ϰT4,V<㬖g[L6 d>֘FJ%y3(_~+ *f]ƾ]62ey7b-.Eq8)g/ٰec0&>\?'+Ts!NBh/0H^Jߊ,FTT}#ZS s ?"T8MRӷi)#xE 40!QDtLXy,F#6gY4l !- %n4WY/ o:dA#Xrw%;ez}zgtjqu!_ZfP`HΑO_ɽT uw29Uc&*u*|>v'!Дٌlx1r v#qH㨞\/͕{+E`^`]P)seˑ_Q5j`eY^EK0F'1]jE{$$\PFP`4mk"WQ8CqHֆS︜,,HS,_`N9w*3zp?˗s%5&#:!h vW i #f/x2)I,h]Ci:0REvP\KqM; rWjP3G9 g(#MeCה2"Ϭ:SK zy氪E+էj΅1"Ӻ:49zB@cNuԿ8,z,М ;@UPNsazuwH J87=-p2ùJle 'Q4t85j XTΉ,VZ.if-p3$}itm UjI: :=R"zݔ/~$ c2kO-Xْ͙P꒮>ym3H&KXO?{F+[Ql)ɥć`lHhx0 VsN^5!:|6-xvddaUf^\7dUtXWUӡ糑ZXJhui3jK-Mi;4Rh4 ͚U+iC ` pd;q2 w2; p\D9e˪1GUP3IUt;t;*orp^wo[<ю;@^l\`=HPQnB (qqHfS9dIu3`73S%B \ʟ-x)ҷtmbaJ< )Ӭ-g5-6xY'҆"49dJ5Ci*-[W1ƚaߕb.}勨;ATk˚ǃ ;.cㅳSQRz/GUx*i%Wʹ9|؅=Zh'}S02`9e|I;U1gD2;Smy±}S܌SQz:Q 8KBtӬrN7͑`\ ߸s!L~= ti]$; RKr`DݱK`TY+gN.5m4{DjGqo*0a(oRJ @t(!u~i4l;׫{޾Aܑע~ɤ)y5j_)Ȁ!oMWms0aR!{kZPH1uHN+:wxM˚ֈ.r^4TBy ,sihe;^7N$'#&ZkfY X9PPX,Ҡ;nI6[.نJ&ITmGjQ~7{y͇y-a iMp xyvMx̽I &sx!mi&d7Q.凲Uܸ+!ϗNof!m55'` Hi!z4hRoF_exkwDZBD B:X@Z %" AM_| glZ9$ fԻq2D/˥!/s&h] sy_7c>b{ w T+cz,H|O<G1f>"`8{Ś撟@NϚ_5{ڇgUɽѲ]gv2Tl:GRY=c݋ޚOP:7ҝOTb4 CJQzu&H3Q͸Wy哖Ci>0_*^ DA0r/\J*P>eQ0:g^ }ZY~#!rny\ywwqP} JNQ$%\UoT}4h誺l5 d{kw$_@cƕK[}WWLg(^wᕓE5z6<^Ye95lWC4GXcN D'*  IL4JH8"9xR@U<0Y_P oD:wf`~ 3DDȅps[e$m@ C,P-E\]t%S3Ψ|eB<${}^*7fOCqZj_N/]px9aAW!}qBHtC҇qm*Ϙu%}OuQ^v4f9=v,,. D  j3t@$8 dDs=/r*ɜG U-n{pi@2vB1`)!r(%D4+#Kx'7kDx"b먺3gj̊<-{G ǓC)bti=$DUg㌸&vO鷪#.Fi^c+¬NPpa6Ӥ6&v2~M/LL{]C6[|`w"G1,?Rd xjeffc3+J1xJird̂}<۪lÿB:X44^~#z`7.Y_E' )Sb;X+ uJnb;o+}n_gL2b)dd2iHf1c"J ]N5-s_}t2 sհv<|8X L=:n!&_|\~>T}͚ct cMt,?;"kH°d p`.SHRlQ@$IL8MfxtT3 iC5*f|2$2̷-6WGSey!W>X'm^z+`Mx1;uŻZof2 xk"=꤀aϊڶ_] Nw,x1x7y_|7NV' !E0Y7 溰[={(HFQ?Y"$:#"38|5zwQ1=5j}Ӣ3qty@ fKXľ̯Zjԙoz >Pz%.uh̦bf2]hμ<ŏxL7W {3#- 4 yO O~Ve5Lmܽ&Rb}}C)Ǝ0\J)k;UBAHO. LW]Ɗv# 8u| ysWc.Eˌ%BfQ2gM/{&yMk@j4b@"4d q6YY}h$X$.6Qn3D|x!FAܨHO2#^,eђGR4J`Ed6ZҌt,$pRIcAIhL`, 4tPIXE\~S PւM ZwtD뢣{@.+ܚ`M?RpEME5}]o(ߜ\;nTMw'Q E}RN["(Dȫ_w䵏L E`}V |9q^SGdrui[{9w=]p?v#QmߴXݜH+WS:wkٛEZݚZǩ !:R o)3{>ri)ZMT8pJ{v7Gû ;ׅ{#8 ׊Zhm5_lm8 X[wb0^s͕%wlgD֝/8 ˎjcB#xT#wT'fQ ?:҃Ϲ\.,qRtPHѕ9$FEQߠ|kH(L`uiFPp0!̑|vLGVonn إ4&eoZoFO41y|_AG0jbZTVeGX߄A?YmHw?@-hRR)1K Ol8xI[=i|N_nl 9=|ޔaZ7+wny# ’`^n?s숰|4MiE^uL?Ӈd=I;)VfMQc?8f=o0280*"F#GRd,JeS$ #s(2* w h@;ޅ]}~dSw5}~$YG8#ЫoΦA7vRKJpWa΋Z5XP`WA rGcH)nA8FG5}<#-m%'"d&`jNӬBI~ASn̶/CYNt!倈E].V9!Kj"Ypŝ^X 6ۀE:,gcFaˀ]O-$ƤeL+P\:)0~?b@핎?2Vy'^{s/^!O3c' I%GIDHR$ɘqa&D@clxq3 ).EQ&=y%7v'ר)pmBSYءVT'EF9Csޛy1pET8 5 g4IbHhS*-La6T/&bv'1q;jT>^U ƨ\s?-܂O @j8?qÈrȓ˝50̀';kB?YYDJ(D`tAzsP9lOA\_$Bݡ6R~4uڛi沝Oj|Xyk+0URx5F&㉮ 2>{;_|<_W/HYjͲ1 YcB L%,5Xxc8n?z?~fCv?o7CcCu5=M&@Oap)goW1ߙtjV͢)6˃"U/1ߡ!\z}C0昇c5mLGpAIsZs b|LQe BhZx>^E>K5HLJ37fv¬q: jzxQ/'rݳ;A\C<'m.TDj7?8up!/Y# 'su +J7Zٿƹ"u~,z`hX? MP`;d2+Ŀi;qKQztڄ4,ĭ\l( 9]lXbޭnkЌWԖ [- >BN-Q ᲇ/\"}hrㄓtȻῲ8^hCBp=Z)ɁئXͭZ}HfvIrp0j/Y.i0,] Ey{0dd)>"af*@">0/k5}VG}$oz^y$)um_'K+$7EN5C@?`kx,UV1/UTż#XI2`0Ȍ"6h7)e| 1\pq\mK p)OhK˘|ŴyԎ5UbtO>6< M`m w da>t Q*'oa &:>y O}fvAL3o .){ t(V ɽ˘T Nc3-Nd! aVԁ\`0Z{AR\h",p%`o=g (WqɊ!xr%4jd4|GTai%P{1J2Jǥ1=d>2f!h2Gcݔ y&w>IZN~Ftdy& !oBr|] F(hr JDc R*;`z#U_5LG酛Uvk[7-d}%^$cF0tS]fRÑ!rԕJ2_D)W[nel0ڛlMQ M2"*"l@~ %PylÐPDH.B5KZFj|Jpu7`7q7j=8NJ,1rL /e,FGJ_{SbRkyMĤ)cM(xiWY~/ Җ>9l9d'r"hB'әd@RNC'v:9@gr9u(.'e8b|ǑsNp 8eO ?G06K cLf30e Ģ- !HkDq~L$aNa%`߀9u5g { wHj.$!{υ>rdF ~9~2+QwB锝(0k~Puju)H;c$Z:PiiE40ZWTpAz{mܾL C§)4ac-}M1*󿮯߁&ES1ʁ$VW i%*5 84W[ja6_ϯnOƺE37 #'#㵷7Ҫj9_,+s=XƤės7w_pKCl_o7elVC^Q8x/kpo/J0]:B'?BS>9d_篓q,(B('wtz2g]aXݮ{xQvN@0 R kc@v ɴu  @ q@'Xa\8uobj 픔QZ8{_k,.og"IqMAYS57d9iWnxf;ҎXu)2z3,Ţ%e!:RmŻk@43f)E `G[D4Yx 1VaDkO QBCMĂx"ݭ5wQׯ>!$,vhxy6ƻ.\eUa447m\fwq@a\WSYI\E1I뻝hgQ]3&(VQl1cq Z 4HPK S4e. _ݥG$ak1vǼE) dH Vd5"O&Pi+=bu2= =d ;@ R"1Tt"1+AJ ;] q;Qw/^Fݽ{fة@j$  wXrý@& A,M1QrNK;* ;-hT:%bN뀤Vk[!)ōz;ꃧXg:/<p?ϖ($rMnY3µXp+ !I8!xf0{ #1Dv̭P8,)i!צP f ]3>QwJF̈́xA\A*כCQ,p3QH:W>:)k:I̓3\$Z{ـ8) ֛M׷? Xw>a1Sa7nS^\.&F1/nn);9J v0/[ ΍y?N?{f:Y‚W}Ι* 4_-ϴy~ǯ_nv}~O[fwg'|7SeɢZ]lPo=8ORQI7f<34~<γ3.DnX$,Am_"L.rfQbHŘXXNȰ3 b1&U!m)h_W2#JH<,'0(;ؙL.NPbp֓ݞ"dwdJd9P!ifaȀmޒrIq *fY6[|#˝=8h0ֈ.+ƺwb;YZ8Ԃ 3$ƃ-0Qk rzHqe~BFTF2kpSEA9 Oĸ‡'S0g{0t80/ Xn`%Xj `@~GZ0!%oQV V!H k.šPA-EiyH0U4j˿U>~&R85ay~-~~d*9ew&8܄>q߫Uʄ IF}٣p8o I>`98Q 'jUp v@W^~R>k]L}zͫa7gtvw; ǿ?q0ws{mUXe>QPǏoaJX޺EQ8\.Zqz٥gժϬφe+vsN~ XO8s(>~ m-Z~,,㬥qˈ`g+}Zb,*ﯝ)wgx_70ip;Kqcl(jQ2jN[Ó5^/[o+|$VDRl^+)zN6k (ᄯCHu ` {9QxHy j+$`wem$IzYΒ# 4ڞbfІ~^!Ok] )w{Y"UXU$u[Y_DƑ-*A1?]C="~A?IQn@% x(5{Bd" *"5:P0S:ޮB!Q]h@L>DeJoj!X"'0&,C,R#\*-c8ηfekOx|0tˢ;(J@% ;eNBTh(,.ajW-BkjBOv;]0hiYk$ISuG⯆)z/7nC܋-Zidez:D8Cxљ yu1-G-f )jyDml++`^F aܖq^hSdxchZ 4\[꽶 li1B2`4" C 4h$ .^5:eͨ?E ȧEqrnoD}.lb`lJ',azNWætm-:*#UXƾ|l*KcntcЃaK'ԍlL²hnT^}.I08ɷ2W 3 LI{QIS:"מ};/FwH;U498jve{  Gmc{2)]ߜʰ>m杠\V2ћWKt:3=9k5ʷL0Գe"QC2@8q6 ~B42tB>e mSIrw?82YRnT,.Kӷ̀9~.Sj&}>PJM 2Ɉg,kC5︭6\aVywQM:̩wtb:lY[i2?:Cu%sG4'_A3N({ī-ߚ+l%s%.W>|]]U+A+1ڨ !H4ڒb8JNڿLV,5,Q*o5EFB6ɠB&a4=o 2ŝǵ⯷y%$KIȓD.bbh|KAHb(bҘDDE+5jkS1~:O)_(y!N=oSF<1ꖊЮIͭXLn]uvWwEs4NR\ &W i^Xi"JjEBb> ; vq|īvxF{ [m7Z9Ei\q||OMgm|~qWqkЎ/i֩y+QIy=/lc]ْ>*WQ2X_hUd<:~&$bD;: 1QNrH"xƝ7m%ң9 JuɽSe#r_ru ©J 2q;#*@+Džr{Onm  Vʆ!D3;j[[ǶKs7 `5:C+E Ŵ}4lM{-Q%3jG#?3=aOtCP*I A?Vu6]Hbɦj ys`'ӟqz6UPz ˭&?r!e_uDy5%8JqO 5=3/6HFj[ Z3F@IDuDHýxMPɟA;KڛĥG /x cJx*Of<سny+5G5)R`(B9s)cYѨJVYk7TT//6y. pj/# og/,iђY 6;v Ї a7g)~&xi|za[п.l= OWKUXsyCn _7i(aKoJvh)C]ZWX U>/QOEـ ]Mgu'sP@pC"d$jՇ 7C%G?G;yweVUWB,N/t]%gmC=sd=//W_r5D`>_{~Oa*~zl>8*DyOGZwi<;Ta8Z7/Vd~~Bk^97O1Ο阯1fpWiОe¢[òL!ewFBf(vt(7rɅ(߹f nkzU_gRF aM#aC *zGwf3xE:oPTv?j^5FuXC4Pm"p`ؗZTy{:-tts9/mB6L%ݒۙ+k)|Lfx0#@)Yc-pp.ҐXʈ2\&9KDCJ8ENyZ xk0o !Rn/Q#J y h)O`LqJ;^U;o!odk*`ĄK>-M4^. !QjʸNH֚=Q6 LJQGNL4@Ÿh#FISv 1X`N?{e\|bG+*ia&IE Ff=CrUQ>RDPm0 ΈRVro)QEJ4>3Wb$-EU픨匢1BF;D3#9.f&FJ2 2dV] 2 (4a8T]D|ŊN " .8ՂgA.rl ˱_*Aqd 5@M7rDdp9.b[=&bY.DO4J4cF]`dבcRN~n+Dj '4К4ζkP}  Dnyra'>g}lnbڢ r(7+6iwm~] b L'X` 2KYŲc[nI=YbUbw.;W\~V)}]u[iC648d˓ṁ mEwoxv6G?Ĺ R(:@ds~o<ͥ) ͅzE=fE;ʆf}auo0B8> ,Ҟ9ZgV[edGZgk@Z=ox߳%\ x['GLpC/]xUJ%\5(i!8hVƠibIvO5zRX` XSʒGE‘%]5l_EOD`~>;\?ފ{]ffEJFjz!o?f́g܏+nxnxnxnԹaQ(-%5B YSRRF'iʴVZB棟i~0Eގ>fӪx]vc:b97oyj5,5G{[[Կ֋oq do5&n:L<73C4^H~f[ng,!Ɗ)[V`P(~bi9B'L:>u{yfh]<=}5/Y ,?.+/5xs`ikq<"7b2KE=dƊ.3tYQؘ"":mN;ʦ 1o!z+ZX(t6 k[(bNH I00i!JsmJ)ǔpSX;}` ̸hc\N1hJ(*RYhEaA8V"ر'\+jÁ3!'s~2[f(Wg0攄uLI 7ŊJ?#Ю:N"wDR{K`}@29%]kԩnOII$Ĩc~VLc",O0@7fu}!/|N,%0:c~=P uǥ칅nk \>i!g,[v"-N(GУh:&?,7u:GT69˅v #pfU%޻ۢhz.dS:ӷ z,| |bOށ5ABs[Z@1.\đ^Q} d)X%s.$-ٶP\>-W]JlO??>.^nGgbnWEyX^f9n>!t3*cɃ3unb=՗C>RMC~ϛWO}_y‹(l_1DQ)eu֑BO󿎗9ݼFsAݐ=DX dY}7R8*9&Y:"dBsV{^Sa˶0B)Ⱦd©NqPpˁX {DTV\I^c%Y\I~B3I$[~:S\\I!|ɂr i@˜KLBs%=B1 l@H~"F5ȕ|eh%[ , Os}dvkEa`dElRyGjq"Y'U1֙smN"({a8ћ:ne kEn@%__3_xW,Z풐Zztoy{~~h|k77y>a=e>FsO#Pvo}}t\qkϺo<퇿fST?mh9nGiq:xbs7m"F%a a B p[S o|Xbǽx]!2;]B~ub!x+>nt䜃8o^ҹ,dimKU=$W e P|"syY᫗# (zML2X(福&  -Zx? ~BN0f='X7Ϧ=e+%oy%m$Xbo X7ݹRbN'iY/FDZ&/~byF3JoL0Sp?,&YZŏ=]ǿsbEѠw8:g QS *t )s΋+,cȊ`kaJ-P>RZ HdHc1lMflI3D&(wUN ̐h pmR1@6`Ck CEoQP<ZAwǺ_Kq7NPE TP%_wĪVH %b3 $tC@4.·͔.E|?8Ii[p̿JMgµR{{1a>% K9-moݓl\!$>y!qL)O=A8ȃ>SM`\ "LO-QI# SU;^p*j]N[F$z+u8pīI]n %SJA `P[ QZKɰfBWJK$|l˅ ml_̯-V;|#:ڹ~^c}._$[9N2SKm.% GL3&p?b(byk=!+'uvBK;˅Cg[qLx.`0`|=,Rz@䬀NMcXb1;/}aC(@.N8Dسb0[ut)Y^0VW:z"vcF@zsˡ/ ʹ<:&ɐCujCFu~Ǿ2a%{>uJ6j^gҞthZc00eÉPzd If&'<6 -=Db][ɩIuB2MAt.O$KRN##(͉1N{4t)$JJ~c5CajCBSb=XSJTED"vw@{ %ř@ř󯷯]^^!t koJ`w(?.o$| lH DXRsF*).Jcb09`L8E8'TJ Qk$jۮ3*A!jHMz gU-RY7(Uytbwli Z5 x A4hRo:NZ0Jb)BkAHp!%"qSRB5:2u\˅ڵkɬ B)dfZ=lɠ-xp‰w 'u.FBBrb95V0+:siK3w&}C`N%&X.J.<VY۲Ž~3K0 |*l;ԟ8vB)gۙm>VB=󣝷;Aj~77{ɫ|s ͧVBAk SuXOh%гi`ÌyEZF{G^krEq<1Lf.zfȑ2>jw!g%cVB+[`U+;녯fɆlO /0AXtvzs¹|E2~M>|' [8E>*ٹ¦AL$gh3'8rӘ/P!1LaLZHtV}K,)?.]){+j);uHO}RKXDH5Rc cx-ǣOLAwXRB`\˓1"A-EڝaȰ>%=(rod<8WY\pGjs kԺ0Ir龏E \k"KGZJ%bҲ QvI}UQ 8_bV6.K+ܤߪ<2^\M#zydPo˂6N )p|I"pb0\GݺX. &Ϲ,H4֚,qZi[:C!$77WFq*SD'z;J)N x$MzrTFhRؕ{4/GMQRb12JqYI M9Cq8&gԒ @LplA¶]U)lŷCJ3 Qݔ9DRrGi”vTVxd>}?{Wɍ o( $'IpX);858䈇y'MVNS9xu%B`1!:mՃNM(yY7wwGB()hnN}:n>]~m{P9kY k: sߕ #uC|q3μ>[Oxv=`!mk A1U<E/TLVԼOnb@8pG. 0N]8QQ@i._VKpPڍ^칸K}!ܑJKn&bz%eK:#JJݺtk JVqvw5U`4[OgPWb& 1m] "ݰ߮T^$ș+b0iJyկ)~FVO/4tz]+ .z%Lr`:n{QhJ^" Y5%ɚ?FH ́n~Ɲc -Ӟ;7eޮ賋_7ZVkܷ5;UpL'ԞyDn:&:A=P|`eB? B`[u<Q;= jK 5Yֲn @LuKrzX*c"GM1!Ux sr ࣴe)svݪC&4M洒@詋|fPmFGI5T~lM{wZjMN59i[r,$!YJ*.e4* DDrR %hC4hOy_,G3^{IvtNy9TlI^dm';>Ϫ>}VXk@`5.VHJ@kh\V sZ3M缕1NMc{>Vi\ʿ;n-to1ǫÁ D L9F %B*GPV`l2YPOHX ar}wږl ?ȸ֦%"A\R *v$`)/aVSKNTmt:'~":7AC f|sJWwmXgJH9'W7LP7y/;g9 ;:Ugﯯ?]a3 +ЭQo_uw45mytBdm4u}||s|=[+b4Ă5YE*٨`EDQXiWuZ>3$q(EHR&E%pΣHEJEPI"QM0[,qBIki~Jf!Ԉ=dcvP T QLX;FcimNgC4(C!SM5ɇCG%g˛dQz-sFiul৞27O*C'חt}hQޏaMNlvk߰Z2ÏwÊ2ᅦaW!̫_Cf>|װhlD"ztsvƬ=>f@w][Xs~}%pm+0P&Ӣ_XZVY_m};6Sb΅QZuϋ<EeDj6n,(Dm`6QGg/h¶=;XrjG9Ym![*%Frx]c:r W O_fhЉ^/ա30$ hv[{]~nv4c`fhtu-N8u^g>/WHs%7Ѭ* W )\\l# 36>}ؿSGK4u:ߏ}WlOJIN==FY '69'[IO>o1BOo4LJo=a/(>>T} 酸=5bE DM۵y ^(Elc.OK,n&[C#_7/)9|j(zYv837&ζ*t j>(|:8>RB[䎴 鼃G(<p/><iܱNF"Yazo߱v{LZkGʼ\qUk7uKj@&KzUuiqXS#?qz}I9=m9K~i|pnK2)OfS,w\KKhL׋h '딶w@yR,rj7 ++"hW_K|oXHOj-gk_}''],U@9yu9VZ-mݓel05cɁqc|f5WUyhU_}1el5|w)3}ރ=U3\\zvs.n?\5_[w|Òp$|Njy:|wNuxhk6򹹪Vʨ(MHamx|`5i5:UK];ln} T1 Lɮ)Ѣi7{QΑn52s@3 A2ii=ó{4֒xxs~;?Fh@jV&GO ne V-π~ܪnǖ&cFPߝk[ fòa cZgj]sQ J*2VjXV-3|OH 4ޚKi0a% MQX`.,<-vuчj휍Y'W1eSI'f*& &X "5dl@ް +^d"-D0ށ25o_Qq\2_  N$\ovzyI&hS&VfPGDdƚV P 9fSHV KIDl:P0*$uM[Ny%eGGEWlbG9YTd%MTHd ?,bfus)06GI3YoxV)LPgBH',"Ŏ G3c(eج Q(i0\;~rV,[?˜ݿ51O8~fo2i5SY˶'ǛRI^MB+)I?%jޑeP*uy>PN%v-TVms@tNId.魌$I$Y_FR=v瘆} 6(Qѽ=->\eΙh (G2vNTJ.D9=HxIʆWL:}($ki.HG;HN[xる,PRYR K[7r9A> lI9*(vN[kTĠ5,Tu4J˟u*զ[  ΐcQa5FYj [.sE8F`6L@YDc W2?kؽ*Ň_FY̓ɽV02Wv+'X].զ+ީnMGQs`<%G\b{i:kwY[Dia妅1Yv$3YǸL($gKBXD..{!O| E"۶DGG@K!tފȶ߰Y-5X2ng\G9ɞYf""vkpښн=]6'$ ; wsOФw:^JTtϔ>oeZ냉Sm`l O{$8&bcm5 *a`@fbGomtG/U 1w/3? q9QBurPR "E ȋ1n5I)hgJJ9+,( G9T֑i < TE)F`95a<R)s@X /1񢶼3$;xV)gs.4B i !kk6TNeTTJ)&)i.s]i[} t7.'K{yN&v0a) -a) H@v䑶 ';XII)IcH Evf@v< H?4^(h`mFV$.J3qIE-.s6뇲-C~%PPA>hPɒ٣ 4)VUꬆ#x%H{8YY&Yƒ lȔMs";'s[:nA }ؓV禣cS gǂ͆.XEC+ TA$6ǚK PBB"X'yiT5x!3l?S3Ԍ+f~>46][ŏ$$1Bgf7 kbZl`G^9(9 7C,k#}0JmGj/}sh7|dt% >=f0Z bJ%WɳBRr9s0"iY! /k!:0]iv tq;xΉ]Tsf#IhG(X$yJ8mGmoݔTj'-yB8F+duIetɰ 3d:*p8&ftcu87) ]k na+"W.q%|Q!f̔;ZD)$0Z֮n鼴&H;-рvg4C\UNѐbc0 |CDTeYUʢ0C/YGydMu^'QF&i( r"E‰g6M0reXK[|ꖄ:]ZFe4b *.G~X 9$ZZUR~; Baqw^E)yY/׀}75x>rɦ]%$Kz* KyO+ЍgluEuE8YZX5ܨU/:,wZk]ŭp^F`hwIR3Hjgjɟ~Ww3؈ov(Y7n q{d(vvAXwݾ}+AOvnR3a,H=>~\1L/fƋ?]?=ٍHulDխ> 4縜+11Y.o|`DVCiP6/w>NX(9~we$OsP8M<ƾ&样 -ѕ)Ck8{Y֌Re F1 E'LUv,-Vag^3:KVEgK2p6*`S%xWEOĽ^#b_͡qԪ4sg{ԥohh7Y疑H7#lszmhurg|{bx0[;ۯ^ބo%䫋_T2}?]EV +{5}2FwG^&|xNwo"? Š圿Ǒe̽zU~Lu?D.isgw.=FM *瑽=I.+2e*eLZh;@w}Nq*"=n.oo^mrӟx/Uݫ/\ZLj8v@Vԯ4꺨yUԊ#kȑ$h׸yJ3<;nSY#1-!]ۆt-A!t6#M%׉%oϯ^utzKPO.J0KaOrG\}"ӭ|g)挩'C$s:^|ߑo?~H`aРomJn`ቄd? @4Tߵ=H=.ؕrVZ^%jc}8W+_ nm8<&Gʼn'7iԾFlgKfRi;GUQ\ǩrȾكn^Ϟ+଄i E0zwŃ4]彸S A%2H+#pX"ε_I&p/R_ |.f<.]%[_?З"o i It&, ebʖlk^fEZF H[IoUR12fdx] fI˵"!GExH\V /_c)R%Wx9UfWjsJ7 DI9 QR)MEH% UHv#;!eDi ?fKGdmTS>{ٿPdt y`3\,wPCޢkg@%je2dJ! [8cfѣdrЬƎ\6gȜnV-mYzI/`+ĘxlzKVR+9(qpNLi 22}N(_₋,hm>rIaRZO$E'A*[6 NAkK3G!#0hi\V2&-)<\"BR9L% հ5V}S3ENP*s*,k$4chQa58{|x[G m^G dy+~, -8f-xuؖjcoI0##/8T㗶D vZWGv~&(zTa)R5Z]d<h?ݫxʵm Fa=tSdQJ8l{mʔE郵GaH8P!;o JF2huba҉-fwMO |ej3U |)\(cZsKVD[ʌzB?"p,!u:)پ9EK\d%.r8FٴS~o5AE/=Za̢h:YlAa#WWB`F3wӆϳ0I-%"sUB,wX5,ڎ52[mzQQFHpm* 5\^QX54ZQW jt}GFDܤS5"ꮃ+!!_Ȕ)5Jh=7"kDT2ljD6׆:74#ϡBpy-{e]=Qi:L ʔR7)LjC [3 I5^VzX6O'[rhD SPHzѓCWOmj6?*N i]80pq: R>flJ z90kZ8<uvz/[:Br!P gZ 9b"͒2\#}-+bO ښ T :ڞb`[/l7ǓU19Uj^} cB+)FZ whVWU? #{rVL}pch9pN]i*>KH+bIa˷952^s#Eܫ);ml6gi1Igt^do8 yȹ劁f9f[NM/Uh,߬O9HpsJ asA NX  V6޻⟏x-_vxUX2- ( Ů=;U~'- Yhl!ֳ׿8B|-(6^͞\2xEx_dW=LxfKb*U6OatўQŅ8C]왂gͧFo0LpUWQ} >*52(zqz5Y3)K{:[9"<.#gwY9=9Uh4=~p?UgQ k5Rz#r:L`,HX0lJee-M2`ۖgz˔u&h/_@* =tD0W%}.3s{q"Ҿ_ޅ.ör[}|JƎWWW5}u^z0jkSHYjb}G8V5xz+ [65b{Ѱ"z-8SLnwb<gr?q Zx\&ɳE`ϑu82{fbO7 {wƯe ?/U9S5U C$ Ky~ jNRfqA@f4/# 1aNd;(LO`d,N.V!wW@#Μrp6g|LiAh)X` DΞ)ޔuiئl3E8Cl0$m1Ftf i"1M:z1UaJqJ̄9RQ0vZ35&Fww{٠ E *L*D{FA#l1_6^ٔFQ!#1<[W>`̊xSu>P{ `rtK1*M_@{`u~](~Ə݊$xyyGp0zʓhè0#[veJ쇣5{ 1:]t{yjAdQ A_bӾOZ!m҃At]1B|ilu8y9.VP?7lXi5Vd cC`:'O$a⭶Sp^|~xrҨY6 C" &^z 3$gj۶_=w*/ɇNv2t~9!q$W~-ÖdAR1;Ʊ(xbXy&:(Akz܈OW7vĹ ޵j5T V>Xs"0l칒3 2&qmy6_^}MsJ3+2刊<* :8, aÔ甃'dM"^Qy:$3y|ȸ!FIRś17^;iOw^As&MnF!GDVo)T~YxE% k|S,Q֘ -2l,f,fq_E{r>1\9BaJ i)\-S*e97"Zp(T7k'Am r$+q(`Omf^!0QRF_e(1wPϳ 1,V$W; ` j;M(l,!,9y}(µ}BYq ,Llq-r4uz7=md6tq1V #n |\b'5=H~wnZeo-*pRbUV<+a/ AA?x~ ~X;/lg W47zm7(!8_n^LE įRCB[;:R.}t&ᨨ4ΖNn/3гmw9b+%}l?C#U֐r9Qq_w;Q A: ~k6|S 6A_oHht&9->bG{~\ ݌,\A!t>wLD|U(!Ceį#҃5PuV訧FfLMʌLM:~#S}EL ,O|ᤍy"TB_KɎPAb9.Ҍ:~;C|73wT86 4,|r,r8j`sM}3W ՗{O}\!_Oz ћ{ /I-V\${ )zf/pfJPwkY:;oNd0Ώ_ke:5 2S xOc Jx+JЊ p.RHq^Кfwb&Zt7ʹUדH'eg'B]>QzzKyA <4 m魷04̗|tˆ_kpKOқW ؟̂N"jϰSeH݋{ .M4V>5О C>:p/;'Mv9cG$2:V akmR˼yq[)oR%'ENRtl뤣dBcb~lCvOlޤd&(ya-u R2P [fF<#;GɊV-R=0R1r"juGR;i乤$VaGGP=+9QY7xy6Q;_q7(y!ſRTQQ2r(g"\7QZ8Jd[F2W)+(lsCъRi9D<*GϨQ2x4%ws-Dkgs<>JNn?\dd򮓑GEA9 WWd B_Gil~><-aUu1 ytu]9fqG׌{6:dlO]^ÓQBDEKߎG[pG#rFD26l:e w1G<_st#X{K]Hm=}ϷN徎tkphGy"'noڂ|~$=k/sOk膿Iߣiz~ҭfʭ0GJ@ !L:_ShwZ?֝n#yفsy6w\PKڴ Unvem׳qI0,N]wBFF#,q~'QFޣ?j(*Jt3pG7s I\*|6Qx/nx *2$Qûg#t-o}CF䡃XLY)M=M) GDvOR%}T6GT^uAr t"ߊTpS1:D(*x"@TPN AL Jmx5>X1ʛ39{zRxrB.w {r3R JPYӲ?X5eua]lI~F\v͠CONXpP{"h)CWMCЪ$ yI|su2N3j.EWCa!, 7IȨNO<,Х¢L3Tޡ:zvS>yǽіp/E-B#!w[.HQ>:F q<_;h0kJWQ=2U"Vr01* E3h xjIfkOd:jS"NXaMSz@ɡ 9$L PV$N!xDb00 5ߑ ,T~G]RUrpn y1SzXt3;d؎`KR@d3[ʉRUK%B&&LD5 #I42Q1USjwT)nU6pb(4 _J,0N$n!PЮygE6vs1By=r}Z?XI+~Ph}gݸ~@5~=?'ߜ=$u >9ϖ|R|Sz+(OW932,aT{sm\a%$$zBO$*4aOt(lL?<,W'g/F@k'mޗY a]:-F)GvfsKs+:1M/6s% yjb=UbW2Yy$'KKQ\{c`ʒe"mTN ʬsL-8n'׀+Aη G)$g92N` 2XAɭwX+,!)G ۺn~iAïfA,@R c _m > {; @ .zʸ̽a:P1,3;A.1՜p``9/յ0Eƅq4q^?QY9"2y&ĂaKqԘS](\iB}.,!,91v"4u#§ˋ{J<c+3ߜq`]MVD/^o%r*k*\*r='gV!Tr<`yN&s%g(YEagi ’=K+!¿t0c,VBQibìQ$ 1#4))/f+1Y^]6W^k^^іN^c41=fH;??(Wk7Hq>_gqXx&cZ 3KXG1 Q/sRׅw~]Sr*͖D,@b-~H "mCӡvŠĶQEu>NEpAj=вڭ y"%SJ͹$ Jtb1(#:mTnݹ N8Pj=вڭ y"%S>İhwb1(#:mTn݅ N{F#-;/pWbCLnJmkSJe#!Z_}(X6Ba~`jĨ*BV1F Q!:gr{3,ΘH}GҠ5t*W1L1Sڍ;&X v{vxbf$ဵƆ` l(~W6_o.7,#+sRj!g{s #1RD9bR҈!Fmc8UtWiu6RN[FʻE|(A^hm} )4-r t-d}Tވh|% IuGY2?.9ss24Jl_Ƭ5.("1[3߷^B?gpgeܨ@oKMByha! !G?(x 3'aJQ)hfai`ʑf X6̥Zn@g#.=eE1o-*=oBH}n& 3rYch8g]\A|,Fj r &}D[Mp}[J<_%l=jr[h‹ͣ?U1(TTO~{7~UG}1jj:Ir+d Y$CZ6 "FE)P(pK#!R`٫>ZO"6,)W"F"aBӻcQh**?gnm·oFN|="}M5'?ېv`ո4Gj8f6:O 0]\].Vc~a/j}3(=1EKcGwuz>GMp:[kO}ro1{C /RvɒLldT@l\H% d53)ЎJj%G:UP: g'0¹y OLˏ>w}尴O+}Lfy7՗M}R6gWk#5p{'+f(GiDI}<;d 4O^j2og'[ޯտgdyg.,z<W{fWYcI/7 o|[6fsq%WO߾7η-޾9oq8vϺچ鵀[D6JhRg_,`GEP>e,d6Nim92<GƭcJ 9%[Uz}P7Iз}HKSawўپ9]w}zɷiJZ~:|yz /,O|:yo.{S~Q8JY&c{9+V8귥= p>';@IRDAz1IQA9~2`Z,NVy|:$}fA w2Ȟc$gDapE釅4 ӧH6]@~)7G2dzQ:PM>頋^\Mt \MhP 6l^DOZŌ6D2"g;9YTTPCϙW'Et,ۏƔϫneZ)n#9,w*~/=5]\OK+CAftWK֫FTޢ9(`?OOچ6,a 뵶 _ڲ*\BT>: Y"c2Ɣe1 Ʌ)fJ~)XI%7eoU&GXsX= JVi*_@hv16]l.y6'#`RR x@'A$=wE饢LDπ;m_[G>t-G7/urzwֶ-QۂvmmC:$!Qx$"H!dB}_ɱCzjo˖ar>,[·-uW [͘M6e4R &b06vmն}:V[ 0q CC C[/9\.A]TFjHg]8Eh:coRlbۇb;rfsrkcdoYPeײTif !5|P7ty ;nI$ߒK9/KEUw[ 2Vu ݐ =ȵ]ꐩ!HCuusaXN" Ia90D9{ XL*0B >ɀsєtE ,9LcCA yۇ7G?l1!{I-G zsZma;̄k@>x&gxYRZf ˳! &8-_vkR1zg+6]֡@W76Iad܃J0LeLGd yM/d֊{D@"„T*&6{ռJi,H\GE_8E]v\2.dcBQr̵BKi,jf{.Q ETgrlqLZX FAl82*4[SLHmYoaD)>2#S|S|>SJ]@q,¡ Ay<\eQM:tOLAِp;9` Tk AdDzm@'Z< ME y@JԎYX-E*4By1z dRk˸,M3m%YLe vC`wẸa[Z>,5aX5ZP8f2XݑLR ej".a@fjֱJ0C\XLe(duJsF \Xsޮ$G&J4,_eA* @ ̃($Z4v4_aA)v^(F,"pk5̣AUn[*[۾e{Z.) pG}@f eڡW琴FG1sp!*! 0"74-_V*sVvZRm\HwJպ-*rk;mH oG1^FVjjX*Kpaoi*w+r%[ˑ"`k)*v+$lQzŸ1%Y$ߌ ; :߼~5\HWVM?=-W޵q$BeO!? 8'`I^}PBRNrzHIÛԣR2ötW}UU]W|n=Bay߷8FWv}lhGGs#Iִr(׉Y= ^'֌ qpZduҐG ۼX&0\9Š``V (z.<5"N uw|YeN5u4=6^+K> AޔɊzwՃL,H$zAƃUH0B.HWtb@Ra`ޥD*=SXPM$[ƣ/bqwW3K!BĨkHMKnskT(<C؈Tψ+x?"”pyT)LyϷ`R ެ, Q.xݯZJ8 H%x. we +h/ʁ0ZI?``N=f#6 /%)Nh8!E1Z.4h$s{NxF0=opQe{K-$U{{{`@Lj\LL:Fy*BYck0z`* +ĢbwJ0`UęBuu6#0/9>0KTiXx kA{PLRGȴ>g+"tWX6ɥ|0.mHk=dh{\QR"}Ӵ~ Ilg0kM~\ߝm8WvY4<%j zW_f2.z,. xvK&MѠ!Ì §28pn6syy_Zʼ:I}Y0eU`%9^üe=DpYs44=ԝWCdtW'KujdA3y1ZqiMhQty> TpLŁ*u킆JǮb:M_/?T%Wa՜SA,Hب; TMS5okͺJ ­lV3ӉT˵ Q0Iݩr98!uȥ[\\Lm/,x<10aE) w1DD bNRξL.J~(VL~֌=ֺe`=[˺gИS_Y֊D^םbAQΥba!2*l DASIm1םmeAP9B\*ϱ;+^t~_l0=su&`"}Q%8jB ?\̇puyD{Eռ31\t(2!]'{k\;W-w70[:%hb||ů,h&7dZ 9ZmX~wwO!D.UrurlfcCHud&hÂz (i s~m6d!/!Bf%)W~ wo͚ -iA;FzFgi>kо]imgL"<}֒G{pdV#SGpR;s0ې,>G?*bŴ9Gߝ2b!4f(Vfnkּv$ R^FN-KCV[Swal-{ xf5~vӛV}yldyStA4̫ /$ ~nf>hަs:Mu\V2,'uYKhL=vӺ):Vʃ)u;`ґެ[y\ukCB>s-͌m[7Vky@Vʃ)u;`Y֭ EtC2[1Q<˸{^9I0Izni} LE]C\G3;j賴 $>imZoS͘ku*mmA{ZG=R:C#h9@%$i!w@:c G8$Un}ڻ| W8z˹SpF`h$Р XIol 7`Zד5%bIJ&k.xJ-^6K(Ăa-%5 R·}5Xmf zk,N>.7@%ns2@2^HQ#8 2: XbԊB7ڈ0 smx.)9* Tmd1EגZo7l=8,筊@є6I`z&?UGr{P*BpKWJ˯7:\qWjM:4G5MܫW>Ds3Y 4wb`<,tӬG`*-Pma>!AɐsC,]cVC) ;pcWѾg2ϣ̊ؠ2=c߂^MBZ} ) ɞ~S:Ck7%do{FPgl)%kǑi}W j5n+S#D݃?ꍤ̃L0((kn o l*!)"FM x HÌ D30\A Z6cnAwKyk9-f@c^;w!+45$bj:0`ы谀GDr,[P&{֣;Ī/unȁm/VL! ac`*顠=k7j!` ۖςeǝ{I¿~],pq@LHiCr}' :|ugzpNOFZ#vvOVr'k9Q3'+kW6KOThh:2Ǎ,XHN `5Ӊ̥fFY,LhX sܣ矞iKdtDUx=',V wEIe+V υњrY")!ф^ғFʸvMPH `4h?<N ÑH_+.I:{ qz^wV2F#GTݝA#d m0 8@C/馺wo|̯U KPhSߐ̯K3_>$5T?_H{( ;j_7Va ԣV8GQu߾1nTnѨr^e=]z5Y5R5Spb1d6+)i,?٪HtZmecq~ǞxXSnE>*2]@BǁD~ZYyܕZʀ :46:bIɝGU;^ +8WE gY jF4<7:1k%>D祊PX~wk yYH+x/t1Vk2?Հ_w0@Iľ {u{%Yt7 Ɵ*QeAkWCXW) cu((jB^L >A;CtöJC7(Y`<4rRn0\|bf$ntӮ`0 eP'V+{VE8U?[%U.g7WE:]\}oʡ,{ܠ?.!*kCL֯s.n7X8 RT= $9j_!/`E"JD)JgD4ˍM-xdiHFm) J+ZPTf.NzXUȾ@E?^"|ĢLyMhjay˭4A~x|;@oEuik5*d$@cg1WLY7֥[P-彺QrDI9.~מөu׃V ]|^yErôHop3ߍngwWA] wߟMgӉ=_M}P竹715 T T'.ϳ56d2yAr떬g G8PAU}@k=y ; 1h ytN| =r"aAwX6ci@PB.:n.j@S-꤂Ŋj5kr}+4![PAGcy/w6kpe u@m^jx$sad6D( +2ˁm'XGZ;y̻3i4⁅F5]p \jqW0$cSz;_{_?/PɺK^u۝U7erw1jmQoc'$HP=ݮr#*HYo7_ɹx<rYOՂ@AXi@#A%)(L`5Qcs#=tZ?gI$8rJ@g{95h(+ֵ OT6y0FYA1E YrnRh²"i^\r2*SeTj2FHᯄMiiI(i6("jsK #xar7E6,vqrK΅滽P'gxsg۞q$(ES%Rm'DŽh Z$mԘkٺ¾1 c\Q|{=3h8:Tys<ΌJd4d*:^y6!\Z{4mZtʾݻI pE\mE-FD/Rr9ZnE (fT>/#œh &.n44dv9=ߘuYjTDJ:sQ_dœ=efl^n ab6u7r$oLCuO`n)=[U#iQp]Npצ]w0s`qdf^ Z,9oC'o-%uv6gD38Wuƌ1; =AL\9֐赝w!N+4/EuNci.{>D'2sC2f0xO!v92t]A8SklP` 3Pu  Do+?A0$LtQ|K'ZA\'ZP$E19|-Q. hiFanEQߏ .!w+VƣӟM'͢sYӆdr!p:eF I,#iUbsS$:ˀ\[[;xGUp[cVеKuIb0yFTu|Cغ3Uvi#^[n6Zm4?;^#FF)!AG%pCGpAktl~GC>Ap ChH~P~y0^_h ƛP37!@,q2^l@ a&rQp5oͿ,Fb''ng:nn bȉ:KCtoQSo9K$"9`WQgq ,8" P =|9?Fez⡷M\߁bv=Y# l2 *!4y\3l]37?cB+ԘW]IB.fٻѶ/)p͏!Abh+ѵz (`n4Q;=QWi2 rCP+4tFnȯ * > u.jpA_洅u 1CNӋ;!.!&Q(CCuQ!fD#,/ E\!/f9Л7.6:uDk ,x|/D dG HIc \J:3I ptoDGTc[۰ )é8hq}:!eD%q:8q281 Oflb"ҐAtzV#)ec}jA7y+Ԁmo$[:I/j;SGZZWn=FEv5*3Džw;2v칛-j R$R:y,Q¥ qn89yh^_=p9J.];#nlD|FZ7-L>uez+T}vzf~3Q[y1Z)ygS2p䦙9mAAx0O'م}..?M4t;[|:ڂYR9j'RVp*_#=ՄnzșSXr*c8V (F"n6U[FsmJ~s1KS^9qs0wL[z;8g7s7GB\:׵lw0LZ; Ζ| Pi@hيkk3K2΄Jq4LWIt"ylc-A=xp!Uа""ĜDbaَ>*)I7͕R|795Tb96TsL=wq@7Szg{ZiVm41)T'Hv#B*#*YeheƔ75๶}8x>"u-v?xQC(}÷m&Hn=Ӣsc}H l϶GB(se5鐖/`t Y=uvo.gͣF[Q{¥{4:(wlUXt:,yf>ZxR-u!եeZٻTYe$1 M< #ˤ#i( ID"O HA3ҳIRp޳8~W(w LlyQfjxkmu)W,%*@wJzOg^-BNj=khG=Ʉ[xx=x{yI8˄BuA׎^j:(PE>-Ҽ7B}fR'D &s=| \/kwv :ٱ̌0Ѧ1`}u\{ZW ΂Q9"wV_IPd `9&KZW/kGpݵA6b>8.v{j%H؜O_O!z7,9hb)'}n5vpNHH-5*lu/!}&CV*;]:H!*}E.n_sjȃwא$GFadM҄ Z' rD I2H|'~~!VF, q2D|SEY`9Ӑ!@R*H4ObY;ňvۘELF)wY< a8MΛD( "J6EiOWA(E+Ihx~EEY2QKȔDq&$8) ,в%ˠ)v*)HAZۣ0,^{v).!MVZÕpIx '$t96$%p![KaD+Y**1huY?P4x84L099 "t(n1A\]OI҅_Lt)īY5^X}MK'bDQ훛χz>|.y_PG'gɐDn_7A\Ng Fo{ozpv#ȘNMg|L?<ڜqiMx$V'pp~->~ʬQJ7gF ZMy-%m &Fo%XԶp$^CNN9TpXJ~q(s_-rT:Z%84[yJE6G%@F_DMSw x*ζpx'! ]+1WRcr #}rQ_&2iF46Ae!Zݍl T(:N[toM+0$Y1b 3q s8f[` Tz 0IVm0Wމ`vS*71w#%m)N6*}x|VJ%D!jK ?\m( ZBJA!Et2/eI17]֦ PhMڷ<_ <(X#b=%Dr֌HZ+:heeW3# Д` dimx|6i X"F+yoYV"K"s v%2GL)^RFfbVITfW $$c٠زˀYHI(}LV$Жɐ#* rH O'`xטX$l=mGwn5c&BIWt V/t-2zq]0|dVr\N|D#+,Bc.P'x?lDq38V\NߊviPQ@%i!^/8Ju] @0am$G!X@x h@! ,CR@Ve`Qʔk7]&Z $3$ 3@a;XLz;Ng48uRsq~9.~~ɩ`smfXFr|o#E J/}dor?XEػE$M*.hqrJbvYnceh!1lUkJqՊ4i{iaDjQO>A. %6?A^*_Qbڼd#E9ޠ:O{}m=u᢯ M8urEԏ6Lj-{}\Z9= ұpU77o)Z6Clw70&wYӚsƁ%.lE+A*`AvLp6 hFZJev*PxEj4b&(م^] +1 %??y`3>/tVXWK1'LM/?7bYF6MN.jvq5;?\F^ԏ1Zv8j& o]ʟ'~l0dJS_?NSr:HZ[qVjDq>'oA`Z~O6M ';]4:%Q_~Vk rau6WhuUɐYB3FՏI$Y\Yr}[I,_o2\hݽms /pك&(zr369>Ke:Шtb*,Ѵc2@xº@o"9˛ɝO]=2,^ݻt9dfM2Cs,J VNb9pd s#^XmwqyG+veQ-FrKu牄~:F<@nv)guZU$4nU6&6PԊ>yd@ }^V2J QF0&Qu= `[nGՍ=3Mp+qH켏e%1${^VFYjBl!Y0odQdl6-jU_Dƕڞ];$z9c&+EٓmJ9Ruol5`MyBluorsXH)uP+zw[`,g`drH`hEъǬhf`Ha(4cƔf*RR+ )xt,fMʾT/Xپ gKtbh~r"h57.kO`t4Cb$B `Y4MbX%,dRk 2#j uQ]/91*f)1UJoÃtUY~#s R]!ǁa+ J'*$ HD0Ʊŀ-_Ѵ u$OIa?!QdkJ] 42})偡EKcG,dzcwŀ)A9_h ߩj@ ۻ6:ݿ̀JpKܩNj |25PR $TȵhbV?NȮȭl{=.`Iwf26ا?[OvU| XӭXg]o-|Ⱥ?}p%RTr_KDY4<#,i-6iX` K.dng͒L!6r$N"SS-{RJ!bO*\)ezk͒VJ|i Lf$sCȟ~d9C9;™-ciwYgӶ sf3_j-aW܄t 4L.bz῿'`ijɳ7 쩕!ޖԤ\W)RΡI۳3aIK~D"ta{r%_ ьf~60,U Mdc"-\B*`dNl8Z/I!}~覷.K3\wRW7 |>Njy]MMb/ ?]OowHXyKO_m>1@Ϫ=G45mP$>eA.mP}7;S/ | NmT0d΀O\/w443iahq gZ$F)q W(Bt>}5 'EdJvͿSgmΫ|xۋi{PRRVy8r쯤~K.|)~V5.l?4#pfO5Q۹r 7O⻻R%7oi!kB 6&o3d]]Nh\m6%({9ʷ"vFܶ@.s3%ܷ #hצ_nлNdm=3Y-/f;E #ǣR4gBUU$Dm} @̶IKIdzetcajvvdNnKbcLz iYw03rM +_%?[ ] =6{}gwapzww5yLގP$N A)JDD{8^/@ xOEy^fQ|n+h{#Se<{9-%5Xb8ZΌ7Y.CXL0Z=xܻ#oN1Nrҙ$C 1B۰` /!}g]/5k3Sي&TdD}D>P~Ĝ1ഷ夵[O^̗#Ċ3&X xq"%}j2"(1"b/ȘGI9">(B!1E  ۲hĊjpӌ0<&c9}̷7NSfWzr5ڋ%K/?%i|{iB2J $_>=#1Goz^<+|XF@ܱ@ 5}8E0y#x8@AK(a6q188{K:KKpI6 `fjIyZemgd&%i5ɇhÍ$t?*aP)?;SA _ֲFf%6vcI\7 @P6l؟ M,q(!rZ8X_"j06SRCƱVJCj[WSZF; gp4ՆPg<ېVV?HYQfr^+ |Loߟd+„.^iYDl ,_v$1ydBܷVcT̶ 5Gj\X͔rw70 LX{U1kGXLb-U˼i6=FS-oaN3nc\TXV}#ƽ'QGoZIښC7=*$$/ ,%fזH)Uf竛nedMt5#F>T( !9620JaPkx843L $H*uHDtW޵q$BeO%շ?cy91M% a2II*俟!) C3CR awWujr- e" UJځ&65ԪuS&~0#7?~-fm7\ M7WwHNz ,(\VKz]~CxRԴG[ S5?_ЪBKۂ`L-H>;?[#DgDh?5pI+ga2ړK rH3øЇ;/`~Myq 95dI-BXoc g)2_m )u".tħ(\|:(PSGn~]Y.ne?͛!(Qٴƿb"_+Moyqh' {6b?.2e(e1] &7!0%sι3 spb {2 /J}nl;}W &qQW[`.j @K1{Į /b:T֓,X~%z5R FE J=)/N_>}!)!rP!J8w#%k:*][@it6ePo:_JԖE ԰eBvrE6e('FJ$p18p"i-Cn~2"#`(n3I> ld(ЊA&%#2!ʁv~  :v޳wq->^?Y,#)'>_}mH$c/`C6_f$F/NH^GeE}JgU 0e1D %$Q@#3LM+"}+UwpYMfyz6Fs{ܐaFrNp;}E)j9CϬ zڜRU&gRFNLg c\s}>a"a,NOnRoLK~mHqǣ=)/xÔ/Koٱ:KerC 8)*\\rQ {l-j4}u.9\ʙaDQcZ۫0[nNƪW|{5Ҭa 'OB4-2fwˏ A9}.*HtSEå|hP<&Ѡ8IZ2ۄ,x`~jVl_j =iMͫ_ wnroR+;tٽe]_vnIKb/#.I׈bax쳰&݊\I DkA"xwkh:)ۙ;HC7sl u~C)KQJ)_8~Kk)d_5*z,([)Usλ otK{[*,O體H_!$#U,w1] Y]&meN+eVWRZU(޵wr*(ݢLt+ {ʁk<^ B~/|)0{ul(NBĺKJ3u#)`6Af. Z!aK PQF&d=vz\`|YcXܟc~,e4F i4ȣ1PiH5=5xWIۯ&w$0i/Qneyh_64۝]'g-סg?>8ah_goN>IՀ+[QZ'oC8ˊTׅ'r1'o6 wٵܟ.|W\PZ{Ԍ/FJ5T&¸DnhEuoU0 麜Ĉ0i^lapXwWbNKK2xniQF[ h8܊KI,S2yJ&)f;xFI#֙'&0)bLJG*Vj{1/x2j,j._3`I.õVisL !KTNF'IʭC-=>b̶mU:4iJ'V}z/)%7O_@9`yR\5_$+C(#rڟ;Qz)4Rc z`g}Ƶ׎ "X9h (= I'qkdS)n|g@`U6HJ޾.(Zbl?&tMtM[:,벷|'NWܬ#J+^݉MK(ՌI#RpU'(BQ·ĶnL87lL> $ꖯ-<<#G"˲\ HE1  "%ḅ r 6#kfJ B\PN4e~SWBѓ`ojMVwFe@M-FqU3#}4R4ySGpK5)tS!zU * WdT\;Ƽ^9;5Q׀e Kpb}yz!h2 PGh{#>⓭( =2.j,1A|=/n6$ {xͮځI{P mǁ^aaaٓj^m &/nT|0"}3l#3;7;P霣 qkJlݚ+lkP)%9h7yk~!+.Tt˾_5 ~u/q}tsBo\O2ʿ?x*Q-Y,xɾC FJ r5kNT'^{qr|-B'9!Z`˥7 gȩbۢ^v ^L9w_]+ݩ6)zT]:6#k˕X> yEx?ȋmgɃj?dzEB>u?ٳ5?~{1hK1<.U܏ôtvDh0+(QCB^ğ/nuM/^Q҈Pm\zWaVb.# 2%H|rZ&r 3E߿w?Rzy3-l[m||'ſ^<87xƞ/Ib@Ab:@6׳66e_6s)bD9ϨBؠԒ0D )rKb/-muIiITPEa:TwTނ5n#,/,z2~QWgG@:ϛO>Ap b %I 93%ٳ6{%-obNh]Yo$+BV3xS@L3/6#ytԞ:Kb*3P1RV1/qs̙Ygn޹#/Y>^OpS:}rKf\J]ɯUWO{lm-,{5T*MlPIEFX˻H-]S*GsM8fe4 ͛2Z> ]WWC7뫎I;r *aUzjQ1lRR@_ ؇?*WʻS=XK3Kwmf{ gE=aa=7諹^9-\)ܩF ]j]>)SHH\ZsDqf!?KHX.ΥIM(A7>DafɜղksSn-6hIyPFC5X.T#"'9Ӆ+ A1^/jyfJծ<} *bzb>q9ċO~PNWogofS_<[fa))!<=؞Z&-@j乀0!S '"ZE(v*^GҞh$1_B9ZC1[bPm.$hȼV[ $N:HA\$ ^`Fƿfk:BXI!jk;{4׋; j.|л^5Zcَ#rcqTMuE;ބΔZe~DX]I5z`"I92"eD{' B/ҽ8zp4:ʹn@Q#oڣY, γj k۳ppf%"ht#e#MfI+LGյb#cNhsc*>V97(G]5u27?M]YX-nWvRxWՃi}՚Zw0B7OnCZt}{|^  Kn4m?cԷ냓`޴_CzޒrLĻхnh.aj[f.brfM%ZL^&??`_{-B~cjJR!̭zv1WNṯf9RK9]7yfLe>K;Ͻt{Q{6>hL\y*Y6DJqZzZt5xA$Ei1X-֎ J^^xP }*Du;N2Pz2n}כspG5UOTcϴ螲-+)tX3l~iƙV]'C 7j)Mϩk쓄B}7Q ThiZT jr;[[$0E DymQ-֤Ji!n"yET+6@ֶsm1eƌ<`.C/Fcң++Bщhi񎝺X3%og@츈6$#08b H@ }՟Ybjxfhq<ABL-)9[-CF=*VA(H%M0tB hM@Ɣo_14Sd̻ک\$2S+g lz~䕍'רI~]ǝ*qz~1 R3Q ʓiO`>ZIQ%>wA }g8 k:k^͏3>&>=+7Gm_t>Z6CE[e\osZ h`G6D zm翚W"KjÁRFf",m N!+-X1!xC&'VJAa-|Жk6ʱ$ jAzb2-$hzb '8-!E8c ^oQ:Rp+9w6,g_j&EǸgM#w$2ƄV%m>oGѬNV|" I& LBxOo9eŜ:ӣ808ʌ?X $Oӷgj('!A\DATu/_9C3 }x}fޖd%G@%<{t@Yhdxb,A◐hs5@Fw xű¶JJ,1x3sAnTsF{ύEl;G2"3(#qM`s$!a685GAnψfG"wZ7HE]@yre4%[e1hq$+4e LGV뀱q}Fb6٦n4> wf'K\J&*xsw[M$={?5ErIԫL|rџ^A|qt8~-Rc+d/9 g*KRlpoy1z:9iVi C<_Bl憛(LҦkYOJ c>B3~5j2gx4i5M+3MySrzB=_kT_6 {Uk\IꋟoZU=b)YM5.@s%O]XM?IP{9 \'o޸v9n3 bMAjnT>5m3{Hs^ܕ9F(B^Ko1}*GjGGA˛Y?!5pr}-QC*.Z@ ZxygŻ])s|&3`Qy &ȟb ?<ɠϜZ5硹{_ȧ=H8\!gvda!̓\N w~FHQXJUvcx5JuJc߲!^:"Þ9Ed5n سϮt҃u4Il BJ )- ߯O+7y qZ`)s2ʙ a^Re%V5 B3d_{b>u%!(2hKaZTEϔXG/j ^¤ !pto(`zCH.C[!4{216+ZΓE@kf$Å/|O;q*F4GBR*"u҄lYVټl{$F"3se$IZGLAXU" UT >*T,Ez}ٚ\|0!QA1;60ւAє4h@g˧"7[ȱJn 4!b\¯R0mGV|h1uWRx`ݹFmmFTAOht\p+RGmq4FFY Y'x@8,G?$'Pm!kK;eSh^/EY^dixQu,1J\@$HprSUt\H1{3E&H)lbZdm_Dx; 0F&!h6cxԡ:Bql hu"10 eH5S$)elhnɊ9i!BEk/j%۾Zc[l\Ŋ[-bX4Q]{/^ ZAbPAm2`BRJq+"~9@?0!M(Ю3$x&B;G0)g(DI%F#<T҈K:$A `K3t"m3cz"/log t{;y j;o| ψٟw~;em31w e⪱D6j-8n"ru*,ΙPNP'wN(e&  F|@Q>J23$A%O3@rE. rmLqe`JC :*f)h(PȺv)mz/H fZ@"uۄ|.R5ۅhDWd3cKZLq2 :Bc3]0[ 0aRܣ'J8 WnhhJ**xIkg+? ,08?5|yAQ{} -h%MBHJOX4IG8\M #iU[CׂgwS,W$KbY(淪2@h!;UI2-UږigC]mB Ek@UYFB &5[ƓR4Se5x9JO2ቬ޳ZEMRɢL`I|h|fN!  &ZC"T6 ˟oe%Pi785 m:.4QBTHn3M!0Tk :B ^P"Bbd*-ne!PE*΢g@ӑyᴦ:3 blt"(SJAs &xKE,eBD#w3})$e`m$GŘ/CrAb7O |5+Y{p=Eɲڲd-{OX`EV=bђH!B à/r.Jh !:L[U  w= oyOH/v$#emTa`&{Y45't CP\郩Ks+xkymCӱ7s | ,D!b(jюSF^_q{9'~|j< ƥM T3Ѡi.}A曀<.e|,$Y]_5W߫mqy~v}=ټˆ% u:86[鮷JI~NeEIwBC] x\\Y`Xd^*$nGQi0ʇ ZIA,vjjCZ&|!'auf?WNشVkb9mSxL׵2֢H`Ò1Gbpzdi߀C%[${3V6<ۀlγ 8ۀ%M"ĨKN |EMEL^i d&F] uu 0mFD~٦@?/̖xlW>nfi5͓{N; -$1Rz}7UxBوN39Q-L&9|fKF8B(ˁre_44$B|߆ۋĈՂAKkȥaMǢ4mZ) -/TPØ0/IҾEIȑ℠eTQIQ>EȦ"4 4?9WH5gH|P2w'B!GQ`@|3ޖcu" >vPoK[Ov )C`F/[2R{dtFk3&SNO7Z{ FZ5hk"CNOhxaw-@^䇷?,?Zi@R0[,~^V#'~]RoL} oCv7PMDmvM9>x}?=pG6ng~ ȣyia"hه4:tsuGg?M.wg_q?;OvI Cmnk>q堈jM{MHUAn.Cc _t*??lȜ FT1)ߚInlr/qp]^z(cj nFouZ|eԌ_~Н9jOMO5yxuxz=_ xnץۍ2Cg6+Oz95Mcak[RKLҷߧz5"}Vo7w3 NS痹?hf' Bvӳe!{a; hɣ|2ϯt /bz6_2>X̿tc~T2 { k'Bb~|ȃ}_k:5ٷ:  ,=>@kO/a^Ё6͸|2&A_tJS=Hn̈C/t\@h›s+#0B|XHk2vk֒4TPOh&PDϗN(dDMe6M ńz3VqEw `JWskG^D^nE$nSwHk;=^%ADB y'3Z'tmkOPM"W@ZO.hd,:YÁGOw0!,Wp\c0D޷hG^{$#/!im w I\њ\.&v_A)Ta"/L"b8iC\ڤ֯V/ = %{'ڛ Bu+M$n:椵 u¶Jadj֧Jl޸AǠZ#81p,Le"m/T1SqisQqqQfvY=WdFCr>ixHUHUEY9a!Ec-2;9BFlu)U* 3NuEΫm@b爱<=ApÊL^yƳ1AzT݈0}NTꞋ5g:Lh7M3zDOiݜg@j*Pi+"S}yc=x yC9`c.;PT6i[ IbG_}Xܢa+% Tz>C`,?pp;8w=-cA'a㦚ôL2%p=ckOP{qR ۟ڟta\RJg.^}]$f^c*6d3@IkOf@z^"_oPbx?i!l|ѝÖ28Ll6c_UE * ``*WXcF5֍zvQ b{z myrz/`8"h=nTrbHi]ՅECi?EV_ۀ pt= cYY3/${WD.yufNn:lm@ٽisbiLFğj! 8^X/%+x4 8c c!!sy3" EVIxCAꢫޓz[iAgecH#YD#f#7Y[EA9^%Ti 2@9q'=ymkXHl̾*2kpY=44ItqFHadC_غ*n -ŋzV/o温,. d8Q-pAm,pf>rmp Se4>`VGx7b#bj 遾ݛ3nA'ʀ'آDR{[˅B~1rgx:}@Z`wE|$B>[o:2op69[#4Ϯt |kvռLƈvZ|8Yܵ-{nw>QV ;=/0(&7<;^ ~rH. sÖÄ83]F/߽|ҶGG9)㦮NA Mk0>#̘1FE:0`_[߿)ݮ$lU\m7iJl[mm !=%?mQMX4a=#e#PYŔrt1}~s-]b 3uDZiV25I.GA63VAx$/ʈ& Jc(ʕdTO e sQqKAPC`  yy_K L7HV_obW_kuKe˃ͺ{q݋ŀ Ky^[$<^SC*{%r`1Y~(n-6 БΝ ${={n5]8nlPV dm0?.b=ba_Dk>0ۭkdV g= Z\}V}̥s5\Ҝ8KNm4|Xxqx~|֗w3 39}8ao Gծ[)JVzx"y|1zyAa {*ׯxkE9p=:N≨J` W ũGNŢ~r[ <8 G[s5/N RU0{Qy윦i9ͶAJ94rQĶ%*aUBF37gܒyJG!Ȼ(!1^d1`3] 6``uj@f`` 8Wwuyצ# {!s#ӒtuDwD;zmqM.#7iߧ*&>:0{znf6@3;Y3BК4uh;hf<ԌI!ߙYG}ÑQc>ܺJW停K;N~쁤kW<`(v{[㖾 v.XԥeF^̐H7%3 d}o B\Ĭ0,=аƪǪL//-`x C$ ]=\}~&H{lEBbŘ.gonθeUy-϶>E\'y u`S cc6~OU׽OOl(qxfZ-FW4mG׀{ cgYEq?z&F.k#%vO‡D-ɵRB >O3R5zz8)T{k'!/9Krys(2{mlhgTg c6aMoOq=9v52 CKN#~8 >"C?0b@{wI; %H) ڿ''@϶h!c-mGޕh_`2`˫I!<|uogZa%)yBqP| $m@Bz p;.ztU:_w>W;I߾U 5G[tNbSǎ=q"M:䋓 XK:"B Q`N:/bR&o cpNE3dUlG\F真nYMg曺>8PAkx_#_Vq0ct5sS5rCͧ [^G(X!x\{VK(ZS='Sgw泫54bC/!X:Azd=#xAp'<$Z,آ@fGvv;A)H/[#*(A'?Ӝ C"sa|zJ3 QzfZsuc'[S)b0fJVC!479 ]+ g[H:kypJsARs ttYe^=%yP!J$]E'k'i#r0 )TS+:j΀ͧ(w]Z{OV +b*GH{|{P4lZv!T֠~@= Anb6(j[m е}k o.֥T2XT&/!-ȝR=(rIQ9YR.GV0Wݠ̠rdހ\d>'7 < Y)Yf[VYQ4af|}}@Z*d "](l_Z`8}BYLJr宫driDA,ֵ"jwMcVdѻ8@4Q(r0ٌ['YD(Xgj\QE޵Dc(,z͸Y ;tSV]uZuiUU:=sszpca^j . G(d ̓ W:&*47Xk)MUnAeduʾw{#  뮙C^#Sιd\k%~ָYw FC {};d }b`4u7M0acة&bgȖ.5b#P5aT$ YCc#썳LLhmEMu5UTWQSڠ)59%K1h.r1tFq ňߝ*m2yی`͘[VY+ouoVRrL?'O! ޠf+!nE,솅 2w;zA3C{g׊2Z7,LÖ0y7loEf#;!5X̙(\s]K"h>RXKF E>2 `)/L 7g}eŨPU,ȫf6\8RDR,x~s,1zFO/o2ӛEc6H1x(68&8;*Q)&QJ/Q93M+d|p$U$A L UBԣ*:*'`F\0?TjDry 4/tu,&<Z; nMyJ_T'Zc K==Wh>Ȏ:*&Pdiijd'S Qt'ǺΩs! 'T0ϕ5`*옆ͯWﬦz2̕vyJhEEv ,@ayP"@9yXq}LUz1G_JŽ4;^J" [u]PBx| qfBwnz! 6uCxN >y Sp \ԛGxz ܯ͓J~ͅ~_K)ެ[KZq0XUTR40q횭~FX;In2UyZo־0v:w%α!\TYКhaXMbD*LRYQQ!o&xoa$|eF$8R} m^ yYKb->7G_EQ&U\RԻEݴ/鼩 p~Kq: gZZׄvI;.o7`/i_)-yv?Otއ_Wzys?l۝w9什wogaOnsw{7zuik{g}_\[է.Wv.wj@:K:V./9P~\\W`68wOI}QIp I \kE:2 l:w߼6D(F9lM$۔;.W&Cß ]aZ¸]h8m/iX/33~ ?mv^NJg͖9*g_[;mwjgWN ԭ.˿cI:q广W._KT)ߚnuCʫVvdVií 7I7uo5jݚ usq|(S|Fiz^hךji1 ͺcRv뇉6JK6.ܼL'$;fn6\Iv,S]z(xׇg^wzST" {z]TMhRGHֽMW#;.LZW[枿Bo׮8 חwjӯRȕO?G>4fW,ԹMJ7 ;{[^vUȕygúI e7[6utG.p`v(GrKzCiFz6j0,/́zݥgN夓W>HaQr⩋4|Ì5IYTd2 v m~1GF Tt/R/v..88f\k`uz܏k%0JrZ߾]si3f[*de9Fp'3pmR1&~.cӭzZǏϣҍVJ(SK$goƏ9`SjQre@Q 96!̑"UxHҳ1MYЮO,h͂vδr'H˲e)@I>?/_>$(tB {S;aB 7%b-ŏԎ_rļHHRj}*i˔Vɀ=>H A1LI#X;'dy*w&uYfyY0^c$%QNjB*~&>  7j;'v+iG6n,ꠄJfj-H DB$іk\|Vh!< m$l/NYfamd,Mi `0I_0MaE ajrgӰF-J/VN@Lh<U1>zA? %lqb(bƁŀJQգ 89eϜlOV>|'+|'c('QĢ~a*C)XTyNF?@`/V<z= AR79q{y ~#u/OP64-sRؾ cj#*0 NC7U'}z{0I J!ʂL. y'{,b<'챘beIA9,번cO/HH1^ (m貘=2wY<Q  /@΋+nܔ}TϽLJ#%r E*2-\gyQ5\=p2=cr{(pxBc64dEE*?d`NUKdA3 Еe1JoghvI]<*HR*, &E*DqB!6$N>8tTOg?!PSG+PTAt$x?*~ Yj %z!ٮ}SR :).'$aIr:X) y$6F""=<8!r!TEn$rO.,r"7ىQ2viqY,W;vgŅ@FL VXd!Hl4 /ƕS^} 4 ܣ 1 q%j% w*.maZN\՞[%6&bbA#g  =Q!ޣGPXt;?dE*\"9"7,r"wv"6E +U ͉2E:,0Yhta.C52wI^~(@&DlP`;-jc׺'_ W?A J Eal "C64"zΝNaNf2$5LI$z 5t4Di<*DI1:"i6]^QnZ2;E.O6omn]! wB.~?2|}z/7ƨ4%Hߕ %޴~Wzv|BY=HۑmGvnH#︓)%SΗג"oL̄?&7$'fwNGB9=׬#/ M뛤DZSV@T#䉎̧!2 xDͧ~(D1s6\7 !qjDH"̙d_^pC\O8 $tyuuY܂ѨTԞ1Qk%RI虄C:qtR@x9&l!Œ2N2 *}!9Ș9Șٍ1;7(AʝZJfEqB:wH{\`iD UIя1 ya|4 0roqU E7?HkUM_\j`Z#aOejRY` 1fr[$qM&{U'Ob\%%ȏk*J0+gx9X-(S!Ƅ`^B FHX -HB@?Pp< 7ܹJ!}%7A $? *Pb.H*(>h # VEz^%EL?ʄ[:]+专]]sWb(㸰UVx&O}|GYZnW,ltb5.%\+9YLA~pb1`nZ)?4{o7*=]mo$+HVZ`?|,ˎ$lj{8+y֪{zg`}43ê^~{ΰɻ3n6/̱E pֆ>o(@VtvzsuW>\,)9+ћN=?a@ao0lnnQ`O/DcHW}PܠQh0C:i ٓv*Pv`oؒ7l_܁۶2Bۋ/LGM%Kbl$H.< PMxy0Bo1Bd}N$)uZ+Æ)8P"C;}0<# ySCqޜqg3yZwr]]{r񼪛/~ ?93n+3zP~|Q>vtT%l˓PЉ9)kkr3.K͉Srɸ\PUQr 8Fq+C}P10{~KЧ_cńlfb#d3~dӱsV̤BNa?N9:WɔZj"VLȽ+Zq݂1st"ztFta) F""5b`⑙Q[!',d]zyyUо !1Xy0֮fFڝs˟S:XKf`fNPX: $28`O(ռ ^.d|tcaCf&0]c,kPz,l5#ޟ#nLZT>*wnL7xpBtyuo?=ҹGĕFꚲ峬"m$ _]2.c4㴲jӬ-֠KV5fL++PTmi2V*8qf/nЦ֋/`Le-:RM2(.ԟcTȁ vps;$Dl[& }/+5a-jc6c7X{#?{?sgS={gę$k0jm4l4l4l6.Il&{%^r۴;OoY b}W~;{[I:)t ^hdV9D;S3=STJ'RMQNP:HrOϘ񪿒g)fx^g׳kQbf;eT)Z%«u"cS.*VbL7SMT*)J5Dnۚt9@d"8۫T'(Qz| [:WrvbݔR隗@]M>M!UTDCL&ݧKV[<&Ȱ r| }`h\z0u<1dd\_n!EjɔGf7`LP 6+v_WITNpb<5ժ `JѿSk73T O@J.=M=IF?[Է)gSi` Žz X[=oIBpY E~u+Z6J1ĞF= F== gkPk!AsFhİ4b^>w_4|C.pH9vO0vK8HT|Xc>رh *5#,"9PD^MVܠT ZHR",1@ GwLűkyhDvk:U b4[J3HwBԍ1WQ3c a!( "'&扟ɥ smڝtrѼH8N\WZd+>ٍ"onb”3۵_R466kj&9SDjjqR;BL,b(fc\ &R V! ˆ3__v^Ak :>b5T2%zL/tWm6,-٦ c&ꂺ߷׫h"0Fx [0aOP=D[|;sb E%g'g%,&SXaJҬ3&qN-6vdpId%`ZrmJ>5P a+oB%wΒ>X`Ajߺ rjqw;wȘAbuPߞyۢ 4YPM_;5>FׄeQ ;`3EZ,5,V}] -l ;;NiۮQ0fŊQOmt lC0^_ N4G/=-֪QLnj`"Zl [@P 5gBQu73?In}ASOYy@NsZ6gGچ۫^9GzԞT{~~k-Fv &xdMR=Eh;}OcOTKEJJ!&[+B Si*0|͒Ӑfs~= xC}TBDTG﷿u`d'⸧Ӟ XƌX}dLhiqe0bQڷeea-㟕;7{ʅm:j4|98doy ]LS'7I])\3u15 'kAj6{㫡M{ M%h8V zR#ۏ)K7fve .^2 ɗ؀,Uc0ؑ 8VNfp75d=J'\h%^K\t-3Dٔ9[ͨ:aF{5wx>X(7'N[ aIGR'}cx]z2NPAފYasZuIi̲@en.;_C# $g 3Ԥ(f?AV=~m -s#-N z/A9<M/kI\(Aij|>wv=ioHr݇d t,6ӝ/5b,%I`/JQ4Q|ޫwu$'ӈvp&qpɨ{LMS%TŦ .`""6ˎMbSJ' y%nDgaV-л~.U2-A`T+)98x8,y9W'h띃,zr@sp9Jtvyo DoM#~O9ɴ)i,D9>PB@O~MϯU.Cyof}Dw^s-@^;;O bTL"~Fu#&)>rW#p1Ŋ|AƇ EH;RQiZ*'Idp. jU_u56 ?_ٚ*]l*wlU8=&*ܭLvvU+L`)kw#@+ < )ڰ߶lM9`Yf?Si,CB1&@pTXVpNjqx@ ,gVfuNz훰Sq.ބ*RFLy߆"$DjkdhP'%|׆*$P&or#*URĸ BDƘJz-D'%=rNM!ASi]ݷV8u*O{3$vƐXuvz4 _U,}xp]@^H|h< zV Vhzpeϱ5N.EGM H;&Ϝk|:UL# {~9r2[]ѧYdyx Iv8ӯ.e:c銝Ώ_O],O g/Sg A~MX*3[~u H!UcB*|N(l4TJ{ uBP ;%x;6jKt y^:5'8cGoݚ У1T;R]]8;YwxT SL ɋIFR&_>VhHK4 T<,Fx߅w??xuTQMb.Ll ..Z, {±]b{lL7݃ߎorf=RC!q| Z(zq&b}/&6r9bTH|0?()uD?*pXߢopx(F~{×]~b2N8qe@|\sε:t>ŎR]B?VQ ÙřhDJ;%9WY Gп9pS6Z_V\l}U$bKQ6rۀoWzq~m헿#[N KeΡݗmmo o)i_0 &0ˆDQp0&fIj_=?7xmU A^ y5fLmduw|߰η îW0&D"o4Dy)밿tFN0'B1r<ŗ vW'2* [x),L[ޱ}Zdby~1~Biz5 "θ1CbLKΠѐB^MpWAKTM:TUxtdYɔp $} 5|C)U~LRmRW;@/rnZu,:02JCgWt.ۉ4a1ݩRIJ3Si@,qMb- w9h[rʼx0;\j?V!B-8zBq綏첓I  E~A_ NTO\j3%ŶVKqLq "t>Bw#t>Bw]Е SBxdH$S\ %Ԭo_\ʶ| mQ FHhyT挗&ʚ_Vu!Ð|"|lI:d%l/wۯ`+"qDQP"%p&kj1"uYQ56&(t}pw+b%GD("㏉CBI4#}~5s%~~1wKᴆ']װ!†ѿcB?YOcElƌ#+i@d:1Vk<ϣ"3M x`X%V֒J"Z*4 qL5ЅFpR+LplPLT 򸯀Qԣ=W`AtR`=D?%3jpqsLb..6 iYD\㉤J!dk`]^7)[zL֫?(܁ *TsH !!^M1P.qnE| ~a]yѓV/ +-8"!460cjD;:jnPˎHHfz=CWAUn=$¢%?n8?k5bz };A$;=$F ״-?FBeCl3\d#>@j5ko _!M6FglcBNf$@K>\T~aJlyDNږ@CN еAPPfƏփ˚uu=/Z d-ZIE]1'csب #l´Ys)ࠗrt LVF*%[8 q<ˬXR ##078|l߽|ЗOuh%k֔,:s'Fi^&4M dIߧv/:2MC3"YaV2!OX7ms;QFC5h醴}hl?&9d]_XG6CuA};ݜaJ: GB-%:H &VyS'Y<9YIGbg1 Ii0}s<sq-U'<1ux<)]-މO+X0e#5k3T0_AQO-)@d:Ox6=PqI{';pM5-uYyvǶgͩgb ^uȓSg}& C55R7㌚qz-I ֒tU=m$qZ%,+Ͱgor|>o}5yz{{޹+\Pb(~'n -/,U*Ii45\+n1r>>!g]X F#)&Ô\)˵Y"H$JR;?χB]_Es xjuA14Cn"bEs/l䊷fxg߸K7ǶkAmrsKЎ~eiDϠHNc$)n ՘ۢ+rm ^."X"*{2q{ӪTeǓ"gLm2V (qq9g%&knf4pIWi"8R-k}#OpmI C|)o QxC;G{")&cc@x!Jʫy\|ݾϋw ~1e~$^k`j`l:R~~٨qc^-D|v@/.AxfYs-ɿ|uS[[$pZjJ:w̨/1x(?`˕ VKZ.$Y2k4.0h3E{>WT1#EL.NR ($0Qb|O2laْm5 ӾŞʴ#!.d9LlLa)1'"eJ:{"-;Rh,4{rGB @sD}-EGB z-LoO} 4?Uծ FDm#!Hz.˖w+y48PC[wpwb^#ܧ-gz H' nw.|kIz6 ժař0r SC(۞DT0)7IfHFie*,I2"|'C&DZ.1`ޞO 3LuA-܏dL1\"h`[ni(e%56&'+̬*0akcWI}_kv;BdM-\l?~LTpy9d2ƫ2ƨH!P[RmTs0YEdH 1(!5ظU\?O/u^cTPdRoOL(H\ ̢j2)YAQw}**w/ C=hfpN^E&C9-"" gPGrSv"58S^ ډ\9[fii[ؙRn{Q令, +ujּ<ɭp5~fvT+yL*wfrP?ߙ.DX\@]IӫI6IOĕxaϵ/rvQ%h=y]w-@~R}/nGaz*l2[%^f Lf)АdJ1'9NtǤMkf醙iԜcT,)DZo}&bs(( pc'$\4V"K2˦/cJ Ͽڲ],dt6an<pL}Xf wJ $k)vf#Q--ϨfvvMMy/n1ZFI".i,Q>zOj|cDW+>pH2oe/\ώpY]Xa ':TY k` dw-y'oGB{@`*ёܦ+BZ`!BCèBR) I؉SQA}q,nnVb+L8vrkЭBRZWfPY orl!' "X7hXߚ(`bO4cJB8cY%Xū_+-e@v! V2[R|HYoOg=N~4Ȅ-z4D*X0yBm1y3A m?1gs#.;JRPA ﴭDCIzc.=$|Doʕ'1F,Fs) cTAR8f"nʲ 0&28}ʵX ֒ş=+^*r >>G|<-': }֛?C~n+`lxW֑:tW-AO}"IRQ[!*oU}:@W+%c}u;P7~ĕ^@.JѦb;6 (G"%Jiab IhVU2vUYZ$4M;2 2.2VrJˋWKl~g4J.M)DwYN9g9_1L40gŕix3v7C-{ Q-wI -=" X@5!$)`>'(.q5?7~af&*i?[tSjwv=Yfy;+Kf6`9)Gtgh7wFX (Z {7tޱ b~tݵHd}$;E7.Ah7=m*,CgfZ hgñ߀ITDZr0b$q?}TgȀHwutc)oCqQNn}mj\MD$<%K.谸^H3(AnOVøT+- ^JV&紒[(tFQ+ٕbNX{^0en3NvcNT.}"m=vb!e ;@秲:GALa(@,A 0jYXPclCREgVg6:dus+NRHy ;Pހ&>ކ 0~+پI ɋ珞#J݇:^>N_6'_5Z6lkw5MjDАw4I@ D%\Pi1}gNQM^xxxtm]w[#$/<9hgigFj} ^\Uuܫ!W7 m "߳9"Xw<)t '/+upkp畋WDu8vE3EmmM3ԀD *2"S"gUl򣵓Z!Ð_oZ:ǃot6L3pP;1v8 |@foC`^!\UTnx{.8dqGj@D@DDğ &<9[t38 Pb>*ԝo 8 bquzy[ɓ8cTx/Y^險`[K9;l˨G1zz W3pUR֕U>-*_WǟYqwX?To܇R֦|4-'߾hp̽˓ WoFkP:X&N:_r>K.7.׳Eq.YfR aeDIUc RHAuDz~JO{Z Kn5Ekwe]bes$%_vuꜢ?6aQj \>:`HO' !!Q3lRLH8&3"Gi;<%Wk uKYERS\bAsyOٍ>HA/t~%V]mOmǤwԞܫE+q UEݗ%I oHi;H4HC΄mC622UҺ~T(c梨;\xDIaV"U3h.)? 3B"l9Q Xڃh7x|XgҎ~9fEb<3J+K)EɁ1FC}ָ_4^<>u<+o iuM5u3`!Rgm2[4){hx v}Í!'T0Ϛ\4TzP8$`- u|ৼ,hf/aѦ |ah@`F`ZdjAgDneI\6%Q-1騑8dApY6*YQXUm;v1TmK.`*)Vj^vܧ͑2EOD/ Gk--::8U8]ᒧ\{'&VU'r/c&<>u#쯏zӢ!}u60<:0͑ՃLJ4c ,"R ^(+*<؇C;eCw%W O- nhԉ1 O/ lŋc GPyP ]xANz!/_kS^65Aކ /qzjwO L, aZXP ^zck N=G8؈ ,PipG\f+>&bh o$cn,QA␈Geq.G; Fy7`S>^#N U]3 >޾#:W>4:8zVjfPHGçihNGs:O{Dmoiy._ҟ .b k\aRވ'v]7mo$V}1*}"~:f⛈,5#z*DDҷz=~3/=eMϺLo&Y߻~\jaދb>KG{.;$ss~vQ-Gg#͐4Q%eX) scr ss c n(,AX$1g iRh9dɿ|X$_?&KP>[dZ|9i;CˇbB!3.lbVٹr5椼1A7YzgyNjt!Kvf߿ͼI2+o?Ma<]»ߧvz5Eg ,E@*iU)4L Wv L2u7[>;Rp-R!dENe28hT\%R(90ahG (R(d8 nn[7n|k;# D08@Ȣ[#$Bj7~&RX7`s@sF"&{mrYT\i%'4V OK@K&20=[{nDSN1<PE1% Q,KD&Idg+VV<-VR,xr EPL-YJ+kZ0CoG!aOŀeF0A|,߿E1FCOӒ^ TK(8h^c,PHX:zQҨ AˆSܡzb B8G@*`shΌ j-45LVk4BT ^K$Z_{hw(:o"D#n c+@{*nY-cpXWQH8b/Dnu<ݭ0H̹PptNLwm_m0rl$i&0%Wfl˒p|v#"ؓq;b~Earl)+unvu4 >v'(?rf# 3#ҟ}dijYv=צ''OКhA}X,NqrvfB"Pz'n91MTQx֝yQ:N_/܇ C$\^NN^U 8;opV Y_cGwrʄ~T}x}R~P}qMߊT/ n~}=~}%eҸ"q*p87/L%(}x"d-Zpp[K|-dT_5>A:R갞EPN4/ƅ`mo؃!?v_ \Hs=Ӈkͬ _J^ Z=|18{չ,N'ןiΫ˒Oxk[̜^5 x8<>HФ xS4xtk.ހ$xtUB ާXVM]!ӢW5t4}9s>>y#M-˪˂T㛑ќ_A.fk2f:=i S_T M*MW)*몟]3i.W&[x@*S*uvv6tpCӁs13 8 }5zkqR6^d4;˔wkON]Evokji50%:ϟ촩‘YWmX[is˚Ke3(:T: Y+ A(H~S|u0o(ūu7 yLc}LQB''f>F;H^2R3(-L/ ʼnm*N")(-O2r 7} / ຊ2~_{yY<~}3~* ]U2_AJ-R-^Ys$.Fc5ʗг*IdPtC/K\Uސ65^iv|5alL#m_ԃ㾦e\"[80e(]Be_YSXp_>Hv 1t>tyR%n|29ؒ-ߒ|Ve^eNq,7Mŵ5UFF^90YDB Jr䷨kT=4p8̹@\Qν3\/4^۷o>۪\k9)aXiAf}5dW{˯O O]UWc"n #9m13/]{ n Ui1#ʲ:u֌[?cXw Ž7ɍ~.8؀&g)4rUYSTL1mp>盌z/5Rwug< c,ND?tizkiQIF}rX[NMQLjGް8.h\ ^㯾lz۱w.M.:ӝmvyoXFe-Ba鼟.&V ՒU!KiTmb;XA8M Ÿhb4jf?=krܳ#<F#/ǑLdyDͩOxθ4R('kdfVS- ;"4GD`^ϣkF5Dqӏ^\z 8HSDx*Ƅ$RXXc% _ܸ޵Qu?}٧mDŽ7-Wxb|t0w?KJ:mcHYr>C/5^}gꞶ UH񘠄ڇۗs`]PӣnR)%x5|^5hjmAGIr\F~}\:i^WRY&Ӱ8Pb:Cz #3ۺf6- MBk#;ͦ_֧5ڠF謅Ʉ :vR:\HoA\e%|)`Uk-T)3ٔɘ,Ln;S3 bKQrO2KJPӊ6KnŽ["U%! 3JժK,<d1`rƳ7pʙuI3.K_tN`-T/\~iBe c0FWWbrZ!G-P' Kb36e 'ɕ0 \ދ\>tAJΈv>vCN5!׎ΙB&]2üI*(܉G%%!6 ݃ @sSlMdMJSv! AIjUFV2Zjvd f]";aYk+ FMWU%RV@1S|6 |98026e0pV]:2{8zY ԔS\@c8e$7r/"Av{,g):\'?»@,#0>86BXf%_ hZ T5XO\ow9Cn(yؤelR+*UMZ g|ؤʊW(>{М7֡9Mb+4gkf }-: "TN) t)Q>9p#4 >8lTB0VE*q",  8qmsTַQ[u)cbfu]!QE6f/~h|0s͖ gn "Ơ/[~U}6)S@eU^RGҔl*V8 -s1aӉuԈ@wTΨwdhJ(I/*C JV.*]X5ۅAjJu`ӪBなA*W2 p&HxL23|xht1K:$d,I!pHT6v~#=hL[V'G'< Ç1DRV :lC2M݋r^u[E^'xƞ=J3bR . N:g;0UZ]<OΧF|zgNp0f4EpE%A{=i%NCۿ e/*j9%V5tA_LQ"7F1Ekj,&- 4B-#)ح~謬]N{3Zg׬t%"f>rO~r')>*k:4YxRxG^C xU]v.TsA*_󆉷D %ŏH fe^Љh- M(K2cN0q58aa֕n-@AD!("?:&$|';Na^Q*al0\g0&,b&%Ykإ_V*BoPy1bf 8cW (ڃAЉm..=- EY]*A *ˉ"BwhHn6c-R/Buڗ+8J B &l-6 d+hJf5-Mftu05D_heuٻF$W`J;2AX 1k?yȳc]KRݶ7%YUind*83#ݔ"w&eT a_>~ޣt_|% Z+Y4fYng/ނtTΏg]DCY޳Bٟ˧VówN3WD5'@gWB!ɲ~RSp ܤ]nkr)UѥȺ#uvBz|n؞w=s( &ߤোϓ+Nq6; hfAOqp?XbڜDҹE%J{u+{d2tq3s W⾽x`%TKyNShc2!TH V1vn_dn8sqִ@OhqčY}╀uEm ]"͸jQABǀ#EX%rRW Vd ZI 9;hˑe8+%Dȝjj8zAyux}]l(=&L0gOv[7D>lckv>X88cMal^p>ln=g/N#OX)A䕔&UC4ؓϲe{_&j{N3y8V<>/V&,3$tI=R'7?mnO7L/#|@eϾHcۺEPu7xU,?{/"o:zFeQbem?Wh!NU)sZ4B=*296e I͉VkeEյ=V4J~͉+Me '!–a~L[Tf'2:RBʧ\2%_vYZSoMnUu(Vǟ4kē&~If{'֌dx%F,y[>%Vz僗-f:iH7̆g6P'y4QKw57hlA7dwԁj`göW8Z[ ^0p$7i>xZےxőr-MY\%QüIOp.DYxIJ֞UB{S)UephFvL2 'Y5 sew] { J#ۡ dj`*OA?YT6f.hԚ^&1A_BE R\ꂜXkADc3m.۽/Hۯ# M0H0*FrU)ZkP&ꔭm4,I4GV+ W9I A8)Yc`Lf*J vz4_ǾY loðn߹Q,uFuoi, e@UOe߁dpڔuV ‰(89 #-1`@yHZ62NO0L{_f. =S_in2:|=kpӝhsqXH$W.TB8ADHDRv詒K\Kœ"Et"˒5 r0u`.XBŠU{r3dD&&-plJ.,F&ְniCEtt\AN}Ξ.TL2bӬJet",VI\׀j<+}S׌f!h}m<+, ]0YҒKѮ23kojˬ'_lYYkGeC0MO 62BZPkGX;Š&\]čl%c5A+2ڛJrq PʾBHsn(AlN:h+MtiN&*^d23Wz.@Rb̶]\ڒ7]l}$P)D?JrWS6+x\I7.iv96¶]4-@ݹnՕ1j{uR7ְqĐ~"2]Fޮw=:T4=f,z\sTir߷O~~O٧σ3? l񏏋Os4\?U[[cITL]e]>hTlewn>#cnz[!{va6W| 'J+bZ_M܅vMQ{,:WRTΓf6C0M6T$f wI]R:˒6j$vx#-h9h%t'LH f$^zRЧ(M!/i L9,ʤQt49`] 3d&~;`GW/#ȶkۈ?O.f7 6Iu0<`HĠ\Ќ rz2ϸxf13 "iQAϐ =5=VΥClNG`qi<$<:Td#;3Bҧi1)Jv ߴ{d;iMI" m>okI̵O3ݒ]}܍?O.w~ GSRe/ntu=WG_7o\͟ 9b~,`u<&w5Qm]&kL)*5ButmjՏǿkYM0~ 6Nͮ'H]&fd>a$IvQMYճʹ{GƧYR,ӐIѧt>18Jڵ +^v8Mǥ<&Ok,9XFaj|Θ$=`Y$ɥŲZ W%!NjٶZ&Б$ ªlr Ҳ`bʦpu <1eC$]v)KGK&eK{k]hA5U٪$YKrH 7 Q$ !;}yR3㬵S*>,. Y]=\r)i0>yƝ:cwV)RxNIǍ1pg5;Hn;p.`3(QU9jʙfbҡIBNras nsz ՘}J9HU)Kx6L.&2NE0 H@$G@$2f̯Nx׫u߁L66i幭dBC2o\K&wTÆ/ ]+z~+K&݄iNKqNŚѩإT A.@f#D.,X{gyqMK]є -2#F[è9EF˞voɲR3 q'h{6Jc:|'Zi@(?%v+Ŕ 0VwﮮZ̳wnj9kw`IM-[6ϣ.ReQևo-9SU=U3I_Mһ| λ-t (9*N?ߤA,*c~>Mn<~!7V"FZ雾+VEٲM9 j9sJYoiq*IOY XFVRI(N/O7Yn@{o؝+:ޡ8ؓafl#4rBXΌ=}H_o4φg2L{;pGMybXfdn뛳 3,g=ԌGS .bFʑO6Eπ.ۂZ 5\%4?9E/c nw9ǻ'O/s+rA}0UߺA}:7q-5l􎆷Lܸ?)(XpJݭuP9\^6n'Uq8L$ WB$ٌͭQt#Ri]B//i>2S؟V|k!{Z?az߳rtO<"!q%]isI+>3FuurȞwbPՂ:7A Ѩ1lMQǓUyTeetm,=[@.NKU4Kzߨq6MjicrX<;jt{ȭeV:2}l0F MlΞ*fRw1rNnBG7ZN,*'}Gͨ *?g5RMx4hG6 ۣ|i,^M tK4y/#N)+4fNϻ4mb:й!kjb~NJT [` 1Vӥw% 7 ƴLuӟ.}寿5G_èxWߏKfYgq,3r%oo|Y,.&v10fdxN !X8H-4О;*6AĘ̊)bq d8d{wE݂R)[HkeǼÞL PAGEYn-47ƣo:ޛPj &iMKub)c LcMB~IQ *U߁fKG P GFw6C'6I5M$ڡR6MT.+@&&R)H a-TY~j=j߬~J *>sPSՆn>a&f14" 'DzӔ;?\J9)"iB 'Ӏm!vItc.R)R3GA*4H9FK\t^Y)r352b솘ϱX֯AiNƃh dzM{r @(5 Ďо턥_$8!e$e$e$e,E{/ys`;:m\-͆CqvȻȢǼ?JDr9؜y >Jzk9%|Η.:sQAFo<ᅏa &5Oa6'F\Pv8V8R@:bܢp x(j|"200@=ΩQs2 ҦԦ!Rm 蕶{;`Zb-C";.t:x*So5`S:܎~*F0=5,4ē"2D$asL\V1QuQ6ju_O4Ɵ}ӥA@fWT&fWm Mxad]^Q30{vMrmwMNwM)  Mo[7MaӤ4*d7"Q̧wnI R"c^9- 5PSVHqQ"DSPA!BXu4kۣIq^m*\oNjSYPk0"(:XA!a9D1>7eV{)b(4r@AEj7.#QNofpFKz77er{gĩ֪S:GֲMCB% a ż \›2(G+񘱀81DŽ`jJ)#5xDӪ)S:SX3ȔDx=߄qI,Y$4)0Issp#2$(:'p*TD2󆦌Tloʐ)S US6rS+}!ق{nT2u/[sy {n *nWٯM1Q2|U:|kT0TנJ5Po ɛ9rGat/2B gPl f{-pU@|< 7Jmjn^E?4/~,F:d}STym?o35(*5^9.EpΩ6i.:MǺY:),پiV,%\Sm FKV|sbH-]BeYh>1Ȏuㆨ[vxgMxWhmpou+sYuñu-zkpI|ġ戤)BP!!Da*,No͸Hq%KD JsQc #~~ӗYc8]045 ̱ZrBg/V'MMK97\ p^qsuƣ~7跲qf=XV΀ݪpRT:|WJZxyqmB}2)pxfɃ_,IY|:x*aϣrGyO>͗('n#R$W{a=O_I @=uxkfnCE=M; g~TqkF!? k&8Ct?;{/?\ O|;J=oN_wtogݏ > ٞ/sy/ޞ]\Ǚ^zFPR i9ΚO/~7.j [܋\ Vv V$o)x[-(:xP-!-)aP[(˖:i i9RBB_ O'.‡q{8ï/w/j,uL:Ra>{5+d+P,PzZOQ&0MGnij:pfl7W sfE)7l s }h0ifJl冉 0QtwOIV[tѴP {ޗf+|n|;lufqhnF 륈5PU&&2^3.do7On^l}qm8f-@~{.^Tݗt5 C@U{ |_MrkQtGe;|;󱍻O?b WE7:1e8 9}>M::Q1lYm?z~VPt]4,  VoEü~7(vft)g?O 2 MyB(q-Ξ?1ɰI %y]~^)e),yͽKC˩'wP&RLc2`@Cڂ t66ɍk[5tF9 o}ECiɮ MOUoJ^xv61B,܎3='kxNk}\Vt,ho{/{Wn=ڍEdOJ&T#qB`H MӀS4IPY ] |cVA?z p#42 im60fuvFkK4gbܳX}:ˏ{I^ȪlHf32``Y=0>.9ugarTh6MP,!N4r/A L~`ݙ*4]s@˙3nD }jo;&'*g)*lI]+LMeN?Eۦ3wF{&ճNjtS̟@²RjUjg'Z`rM4`;(>O^%-ok7.婢0F|2T FXSxU)f(K:1hy;!t*F!"$D\ &lC &2u4PdZbXMY)7-fxh-q!t&{0eRizҴQ=wmr#V Dٷ-@%@Pu9 0^kLmBPރBc8>?Zָ[ɒ6a;*QQb؎ Ukk:3k\嬂smI`hqbJ^>V%k_T_Ur)϶@ULmŀ` 및5m /w~.XtR*I*YæµEHԞr[\lka'ўF\JRLmOFee [g UkcKdWbbQ閨X[)VѸw2cÄdl$. 5#_%[Eڰ<*(~m/n&{ rT(*8G^Z)g ?pŎB";=sߊH1g[ťJ...=&T "p1UW0?=~r4.&Taw?+^uNUr 삔qBh2DT8U1 )AJ[8F<Th;NVp6om 9`{k50ʃ1>@0|R <4eZ&IXRm/,X0ާ ]-Lk#s(ڠ|eK]H-nBCW%`3dKD٥-3GTe :AAeaR+Y7[_\y_/ri9ni"'ßޭ@`-]?pFIZ"[^F ׷ߞ1TEh*1% &? ˻|%z~X z(ǘx?PTI4A/lreg󵹵=F2L^u3*\ly_nnp2^R"wŋ۴(0tQ}u`9BQX!<ٸŗx 3rc&^ZhkrhC5Z jHfh95Enoy-/o}X{c(Gр&2Xf9\h2IXgf܀; :!(UڨRMܡ8΄nrk6-?1f gz{$"' |y-фF`66E!Alz*)E &9 Hξ#6>4AMKTk^[cqQnl*ԥt.Ղ#ɻW"(%ӃR3t)*(ݖCPhdѺB䬁-$SRm=vDjaäsŎqWL<v h UyҮy Rr躀󎦨6c^h8Mn% u.ڭ[ȃ}#b{8?TGeĹ-˫%atrBn= 3hBA \U[wݠe|`!*RRKss%[|@^}Uf>Ic<{=/bmx6<_v S#ȅ?6yܮ[/]=~xmjP#! *G<-[h?;xӝ3:˕|mQxUtȬ]6>gjWx5 hNCV\JM?5wčf>]vz6NO]}Nu%3TOa ' -5}uV+ SToVCV_9=0xv+uPYFY\dL;UޛK(r\;A˒",oVN@X~ZǣI92اޠ)ʄn*ZMAE t;rBTs]ۍ!)Q1Rz9-l+tzmKPEv;?h \W.DW &m^V>g J:l"WL$. h ,h@0M`h1/F:59Ζ{ao)9TPn -Q ڢ@6L.cm$BPmYg$v4@|Ѱ16.ZO G#gKBh@&E3CA?Xv;Pma鰵S3%H-^TyVm/|+z|'r[$3Fs?lz;Nfб6x{)" Y~=c~SF ||3 9mFhއT-nc|zl.s蒇K.襍m+RM8&?gi^ &'FK磓k'TJ>d7e͓2'Fݽk#Vä)ub7 lӔ5r4Yڠۇ wE}dhHt1c`Z=`woxeo|r<˷ +b6修ZISo.헋k{c?.$%>Uc?ٸw|ק{)o3-8u? EG$ى @:aHPKG]_\λf1n@|zQˆ2;knSq~_zjw#6|N .E5i(dIi;yRКbN'̹Nf'̀~rJm0ՇEWq#rHv/fvbhOta1i>_O>e?{u=j5xAZV/^£E߁N 6G-zO':Jw={Ǫ`LI`ĒB6^%]o/6I!?]}y< ‚&(#$N+/Ax&T{ޱnzַ%p};G18Bj.oL]v$C|_ZؑI%LQR9\}Pԛu՞wlQ䭏Qk8AJx'rOԿaj_XCǿ~(@t^̻)*ƖSCy(jQG[ߘ2'@& c=SFi&B&= yf``J _ms]tv ̙_ݪlsgc|"]ET CM U4:9qne.QGV4%kSމ<\THL)nn;½3_eV]JShsh8UW 4(,)aDdyR w楶&DD$20.I+I!M „xED#bK< o㭉QI" %V4%Pt;ne^]=j̨A>&<TОQJy-6u64V|JdZ<ڭd͢^)G(vlq>=qʊ۬H43,[v͍87rF|*owNЪ~u:hT-_+c+ߣxJNItk4ϑZN炟\Lj.r"Lj.r# NA i*i<g}*PqZ8{&G5eTkI ή/$8h.C0\',6D/*)e@Tp: =i'"Iyk r`b"Ku&mF9DH,ӫ7vs}:  B iv75dIQ5^ %be9d%mؖ϶/r}m|cOՉ 3^!,ejK{'lC8$TKƛw)|ztb6Ӝh71uүc?mtA4gd4MNCãF5ڍiCKo !@9Jĵ6PfV:6P4Պxr@Y>=:1xSA|į9:CsN+YuXRe:W1*LtC iye;܌uvWF(I;d7'=o9 &a1{e`]oբ {¬Z|p#!ފwL\J%qK6Z3q@&jAsww"ǓoLUr p*j;֑}v1hr,aJh?F"@=a`j|NCaB&1֑*/ٻq$Wa2>z/^f'{ۨV$$($֗ H%!X2/=9pdi|hj&yGIc9!iKx.bS$Ig{xFMv:ѯTG⿇:wZHIA.^d%vy[zgM3?|02^0|LiKq]\/xھ]l a@_G-E**.՗b&^U/.a^YZsE2F} SA&,nRߗYReM$(vK"ܬPQ*j^9FZ["uҕmGiGǘ(QOK7)sY0pҵ Ud`\h6@4z]ic[~CKf)Yo~S,ag^D^JtRf׋dGd7 ďqqgБ.Dە~wy~ʻ7)\c~ w|ா8Q}vOwQݍ7/}qKB ec\ι\ ?` ,` }vq!#V i._.VpK-( *HI҃T}qkG}MmP.зO\N}S ,MB8Qf{=;\*%"bwƗ~35勠~0k;ˋt61C5M{Ĺu+hܧT1  oOse0h䥊;_z;4K#EÉ;C>kvftM~e0פTVVY FIn7d6-;t4 Z}b,ӽ%FF%b +kZC*9ǐv.=E(6i=#+α0i,5 A]*q8*V9ᴟn9wUBS9.DE)&XNmklLr[jitƐY0EauP;o<џ9˂@o=.]j~q7Y\8aR`+)BEj=p>xwcLGFLY{t-~c pF;j[j>j Nϯyz@J 0_'W|gI#Bx8S㰎(?aV:}P z,.0'u(@^fْޫ֢?O*]qfs3E)չ qH`vs+X,9)˧d\HhDrSǽU#IlFI91hd+Q$KÒ9R [LB]4S?}xB]װ퍇t-ceJ[[d B1Rj&iJ"#֤'w$PIheZ-`0bjQ쮈ꀶeZ}Ϋ>: ;f52A:V>PvUްeV9PD EtUDE7zTPeسV(hc(C(ii#=Jl* 0F00aG{k V/_rvCǫ>{̵l5J+Dƫ"Ϋ_U${sOrhybO]9DtJ@MBֺeN6!^ Axb_ė$ j5 J+?$yiL26ٻ! `3fmalk4t|tgaY"H|kH)-&ᢏǾv憎:r,}: W)tZ7ۈjO-J:)yjI71L1{B;xdp $ZTxwO,%?pZ7j㴅# C*j|/I`N YNc~_bi>UWo?XIiGuN6{JU=lU8%„rا _.Hw ;5L#h孆Jf+M[1i՞6"{1-餼ztʷQ8G=E7CX,W49mbb'I\A& &#|n|U]˦O/GhzӗYs΋!䛫?*|0Or37=NjEU;qd"VOռq^MpǏ/7'<2pn9wHe&Ra SpKʷZ*v5,XD;)I>Li )pQ=)*p6= >FNڃ*.xKjOj5/~o)# j⑿o*}C0 =mLrŔtPWû\A)},AOM?Wq eep\7WjO..U;.C=TBi%2oH#GJ<=N*t4O%S5i*!=giKSI3;h3x[#{FZ6.d} @`)KblF08+8 aˎzIK6Z3QkIj%!k,a=\E5~q@Zɠ/zeb`Nu9DjSkTqE^4 /L*^#=G(kvi՟>8v~Jl'cyu8G|8G ~z};6pP2g! Mrk$K^fgpJG?4g[/r<ϼԨvlaG\b0 8 mwUl :T^!KzDBldIFldI)e,K 昏 B20+қh)#A .T>juá6ltÏntIc|ۨG xWi&7Ѽ^0ܦۘ9"|_C^+e;~¹^y}nqT[•-cc$:7Y'mȠͥ;şInsPB~$8@3&8G۸ @oڟ-n3%e/If^p%m L.Ѧ,@. R2:̅^AR>_LhP|-U$Lr$LE-`:BT!b# 2ͥr %x 6dԔJSQ\;Pp{K.bAubpP."d~CY1֊ uݓ?'XIN j%`Tsji,V^Pb|P4ni-?Gin/ռ-Z8E:T,"4^4&Q1wY@AĤZW.!?]=L>f??|hNg 04}k1ݿ׻Z~B,T+\{0Cfu6d0Tnh?C zhI.s+Iv6${'=|p4 i!G%Y:7 -Ž8 Uk/,w k\HTҒV3[;~hGN !Tnq$[,eAQ)pf JY%J̧hԀTQ^%zuU3ڬ(c:}**HxEZ$ r"W)a\ :Bl^yF tfT 9k{5%+f;maTPoWRz(%c&:Y%9hƝR`u0n1U"2j-}uf `T 3[on^9C)8'U,CP9.F,F9CȂ6墆Qc&62SJC$GcdKC5K듥:IhTh '/U)TLdxU7uo|pl]<Ԕ H v%q.C-i%>^cXyU\0P 2Kv?}|\^8!h*Q2SitN^1} wחHΚHfO|%1s4mgb]VT%-pyVts_֨Ѱadaf|FCIߵui Xi#V D BoO nՏ *U^+F ֯Ӹ8|z"_"oՄ2a47Zs5=q'O8ۅQpmZ@,Zv"d}BK>|[hްG=-eg//Nr?Z.xzi4؂j7 9doͤѴ󻧃檽9 -CgOE8>pej|V( tY3K5dNk 9f1Yu"a)oV" ìph>\[f)qgL?~K:y[-X%rxBs/yK羷 bEi{hhf + ~E"6mE90juV0TǮ쫏q@wlsܻYK(/s7>G(kI[,A1At[$zu53pZ"Z !qKɒoiDy[#OC)p R%uLkʹ:5p,0OV'O=Xt6qS\|{% %ֳ@* &L*am Lh ZmLHDRs#Fcb!Ф;UE0(9)̨|';r!B!'1FIB#"(u2b:zF9 E.kP>oFU3{/1 G_'E?9jZӣw&~8r )$?>~yOrѶ|SQ>ݽrLMQϖB珛c.0B=Oq~h0>觸]k\(K}1]MCVO]b$◮Ȼ饜b&_8'"ov8U1]ULr$WLrɤYal3FI\}GQ\)Ӊ vU|oQVLQswE~(?UmjǞU|xs̀vx_ MH-nXM$MФ?4 NF&JI(QPg-H8iL@f*#VݵUF5'|\u:Gv/bw}ܘvq5g9ղPWhpu [Cq,1:eX-߃[/Ifׂ6)HNcKbElYÃ:}`v< f_"S 0 (\]jG]=OfO;YC/#YEsFm[a`TuVּC9ue0$9'9<;{uhJxsRfՎsR MY9:DgT̃Io7<8{*cs2k z(rΫ|TJNFF&%z^\@X 1 D+@SDC-(mPʵ6RqR|u+Mwy"1SMUPS$ZMxmMub ]TKW0:L%UυlUD7 tI!kxa&Uш _׽ &9@mI3@kl 1ZF`2&ֻN*)K)I˭Y(K6k*٬|BY ^ssPZ6$S$J[4ŨhLTD+-m6`<>+-u3.Hq֎ )8\.QK@ /fgT1FD RJzn^=a{MxT{MJ>܍nwWx(\;.|77r7Y]xЪ r{-j"1R$`Xeqk `hkԶnc_{p^/q؋ކþ6zalnI/~9G5CL5|KKv$sDA]@KBKL$3]H~ e◊YJb{S|ZXP}ZG8絙?F2g0g̳kcdtLj^(۷oPF m( T|L/EaB`a09z08_;IH Ջ ƛ[2Fj+`_Mu>FM$fuK >DIDzMZNɊ un#Q9A|Ĭ^:A2 Iu:f\PE1ˏy!}L2B7qpFM2GK$DaWXSԡy2ȓϧ ,L(qIJ*1e-AO6S8y3vUe+/BD{&&.Q4hFgQ[z-MTv]#}pk,h8 <{)\dH\/UZ m=FlgeqBRCB5nJ􄢶pj *fhjvBt<;"o;v|" :KD3i$'H :nx)FZSܴo7a"SO>qp+'wse}ڴ]6jYd1n.)yZe߯#[V`ǵ6NL߭j鮶GbsAI 2F^pc-DŀZK6vJX=!Cxp7.ߛ-1T&YCaD[a: t?kvN)YkqŘ/* 6[@-z6;EjLl$}){.e&)ˉ xd<iӡRzWY6]XI+gR1HU*?`цͤ* t[ѶRS>#{t(:^r^7^X6qʀ'0F׻+½0k/+d$.:z2~ A`\$ (y? \4u":xvK1.}Jz\TI*qQOzb B+0H범.TOS,%IR n+4X};tءEW6؋VcĂڙY~? 8w2oQ3y^k;,Iqd9,akjPu Q*#0U;y7 Nj.`Gk juާU.EV*J=2ug8V5ʂ*̑8Wy! 0yau3Wޑi@Ai/m%.%s0JJh"+4ot-2⟃Q)NwҚ~?/²$ d,,_,l{1{\nݿr6sf&s?_grGP&FdeQo'NU Z-n' GmY`rk3j_L8XgQ~C 0:>GGPX|}>n]sOяVgZk9 (/R*^͊ąIY%193Zh+qX#(roHdBxzc}1rIw) }W/gxWj~dŪg PDRNDY p/1LOP|P~:(g p?W> (hjHXä=1rE+8hAX(DZ`dxA}=m㫝ASs~x(]S}tn'#VM}e!,t/c0RդL%:5~tvI\9B dd]GAMGLIlN"Ům 8"` ʌ1D=ت9ȳ@GHUxXiWLjW#Ҏh"MMjk0WU(/3 )/|5`q[Q-T Lu k`)dZ@6Z=r{5Il%; 3 d, ʇΑ22ӥγ\ r$QL}a6VƇ-Lw;#x|' $x,[aSL>w.e;4{r⸷_܉{>^STC,Ԗ H1G_Ѭ!>u1jTƾ޺9m^ⶭe}꽞̬Ɋ<731ȉcE:F4Ʊ?}4ZDK@AbiOV^Gm&5\|HdH%WCn>f a6$1hȺ )c,T"|zld*6$۹/=2ap*̆x $Nr໸H* 0`Jmg xK5V߯ztXrt`$=V}Vk-쥲`ruݚx5nW5+VG|:wϯWFYa-T+a5\jM4Nuh]WuE^*P">^|ϳDq%#+!w"8mov_HBdp5$ 6 *b|h:oo'ׇV[YM][9Ģtdfv}'>pаspbelDTRg%/֋e'Mmbj~vs5GN[9Z+/«K؇OŪJf?s/w1n>7*8;LxUQq9r\f8ߪܿWRM3}3w(u?^"Rf$cxPYkٛ_̪G\,W7\-}?a`oF/FUh3rqc>YVjҾ_{U/ώjc]Tc]Tc]Tc]/۫g }z*8j>_=+)g-gk\M:_*pO;Y\MݿcʒW}|溠O)sV-Q )> 8(U(}@LF)MTEqF6z`837ߗgժq`t{Uf3<ϵ,V5ό2Ş1AM;ZZ܇6jӸ:[i>d^]j-=M\5*Z9O"yg4'R=뼍m5[}pP.W%ߗBs0iĀQH4[:ZldCFҊ+4NIר{+,:K>hx*(p<]3{TUX"cȠLPJYd c sYWQJgH<$RS yp!rU,qkW>k(}ݽ`$`vND0_|Wu^0*U ZA]Z&!N`\[,&F\!z̦C@F,/ j⏽6:6٫tcP;?&/9x[-HSG"Ps ).4BEɼ05t}弲yS\f6(eA-(H_V嫼J>qZlTX|̋RRB2 \jacͅ:e#".C2INzzjK s2Ls1Et&97PrH UϷƸW[>飼,ǝ aP2|Xn7I=d4ԕe!mu_}m7qӍSLg8ًo\{_SP0<ֿn`ٷ~?{2BRCC4N.DQNȶ}XUUMSrƮw zdXfٕYW5/ߺ c29G0X"⧧pngթǻlfA^M.gT/ۥ+jy ŻI)?ѕW?|bdoly?p{gř IUƨ~*lF R*vo{~Ri"/ p*K#49u_eAttUH JY\VQRN8\< bJƉrDFYX<'qF j).n&g.™Lp,T|X,81rFwmI,4RzFذAl}J"Ҏ5DkRL}=i5ASՂJ5^ª=,[։RZ7Sr0|.wXeR-ϸϐ >O1l?cCB(+knWsk%jnUsy({b[|~p-59-ym[N{(yvUY-y-~2cX9n➻cw'3QYwvwZej|ǚ7߾Q{yvd~YpBgMc;y17xStQVpDXMtYbR[Oʵ;*RĪHVd\bc1'=*bƽxb3}e+D 3.io"zEώNٝs{:i2ٛ*k2/OխYmi  žnHFޥ@b- è?:J>8_"s# 6 Ƭړ,P]˕Ex@uzmQ0WAlSm/7ޖ;[Ski$ZCvR#VhUD [7l*{Ɛ+ ǟ]#Jx]kY1 _'WÀ*>H$rGrfm(ڤKszDTfE߳LI30\jgQf+UnԦvH$:Hn}P-a|С M)]O!)kN!SoEuc$ہ߼I}oHܗ};?g{1~ `EMw;3R* LKrDiҜY~/00G.& fp|(wyR,d?'ϟ{j>Z3yg. &BEIYn:kMbJ]_ju;.L24?cY|R9Kt-|")SkǏEuN?t_~+I>o_.|#\?%K?xQ_/^yգegw2@w\g?7<x{~}7:~f吢x*{C_dH[Bq//93XtC뒛ŁjW4ŠyEysjE鞆;qIډ?~=Ñ9/vӧ@L ǰ| +զ% 󷇦d9əs֋`j/%>k>p+|TLNMFt/VrW<\E6#k7͠jssz.vE<gބe>(3&tFO'Rg&GrH EH $IYiu۸I/<O?ˋ'#(y3 >?_=ŌQ+ # م_WR ҥ.&afO˧<- /p:Z8@J锟~4J*¦{'ϓIz/Dh4z4ׯ,0{7_c(M|`ҫץ-3|gL}淲ѳ_NjPq륳(yH6oK{O&;EFzq$<{P;gF}7EK-v@!L9[E'AH$Ck+>AƳR HN"whaѪݬi?) ~wYBZ7]fe,w: K`[<T !t B VRZY(җdLd ;i!FH"%_j\łyk9̏ss}PBH=h1c<= 4%tWZ1WDZp82@&C¨DHX2gaGŽEB%]W6|P3_gpV(5e ~FOSL. -(l}9RaҢ|!>@EOqZӖH%_rф<{M 箒;7=$Amu>&4~0yd RWַB'W-n5U]/(ue 4]2Ɍ7NG$) b*AF . /ЕQ["M{F Q2c2qyumڸ\sy`-U` 2㊴7hR!D=$rpXV#M)+[hXqva *փ//.F|8wc-)

P $xA}^F"4Z^`-) (&ᶥxq 3"Ҋ 8<8Ip2{xR0X!A!!!tAw$@I$iP#$x=XKPq!*$cl0^CnӮgj \JJ VFFrN¸05:6R#Ef:Q\94:ixL! :#Jxxy&wps\6L.IM䢡!K YjhRCCVmh)sPȁ |2,&´ TBY-qDl*F mKJ8i9q~Zfc4Abxe46mAiZ4B~ a52P4 t򩷓!c [0tƅ<&{qnǚB 6B >J͑$T3yk .M2 9Ǥ"/K65`DhQ=},5/Ja|jko\;oD_kmV땝}T犹;B{31䤺ޏn=ZʍSu뵮OխuSuP ;Btv#o?j<¼HA`SR S>0GT55"{ c 12oc!FvQеBaf'H$ ,jbyS0'Z]%w Zg E^U1wTD~#ifzg]:at}({cnWX({mw_qY_NMуeq$5LfTX%{$3ɩӠ$H $Aʛ74Fݼ30`jƥ?n\(׻uzVgD綔erUQT}((+AڕFϯ*M<>u})ȗOv1zXt uק9خOKIy 5 `Gjfq]fCEY,plA s!cXM1g IQU,9±TYDq42S 6M{Zy`82S9\Z+]j&1t8VGJXt:WS83̜1|xcA4[ArȘQ#.A-Q8S˚S֗۵ 2Ů)/ۂW r7"i$E ؙ)~̤YD`F)B:ݕZy8)Y,舄c,\vcUK4{YOP.,a_rP6L'scH`R0A.A3cb ;ㄷahN B 24LWx*Ւ 9n1U㵟-qO8)zïG-9"u>_/W-\NЮhcȐKz\.jQZI*r-ǵ MVBF }m ˮ(*!JoF{b(T}_/$~ۨ$quY#jg~Vƫ3*=W$|hQ2zGz_x|ëx!pag+FKm xAr LMkܗ6< ½A..DZ#hc OtX0<'txީ}#b2@5kPWCϊ#:4ẏAŽ I;N!}zw 8xi|~z!8u~roe>eFXܽ pJ^K{X"}l8,[K-27?x[{zbʌ?Fl] Z.1uG}43}-$er\,n IHXҙ a5$\t Gv^t SpNYkpz@)EU$sb4xH˽W[Ow$&PBF_Y]/WL_ssZ 3XS/CNllB 4aw.1Lz3I'=QCϟ>ǟ\DvGq!  A\ۅ]JD?fP}_3ڡ]T]MjfKy^+-:RWv)L}K矋U9"l̵-+ffZsRD8Y~PC>漩߅]J M۱z@o Cy5嬄WrĩŽںWP.oݷFPP4޾74R*`\TU#hˈ+&-5(ZXc-DWT}jW7Tt<>4Kc?KV?dGa2zu8zͅu;McI9Mcԡ_l4+/:_qBF(e$U(Vdb:e8*X:0ǹ!pQ({5}yɐk@22,ǁ9ٮiL*{^67.N$x$-˺ӦBPU)?|jvRyq:Y4c ޔΰwhGY)^t>COV/ }1]|z,?kH܌ g.fyDe6 gݳoIAYlʆV_JO%Q4 ztbך f$]ܦO~ƶ2وDFL%*Dƽ@21S ԛ%&JkE0f6ULfpV@ؤS&DYl FoNzZI^G8TZk_td2_x7\\W> ro{TC{pKܻLa<=fwcdUp޴t{4۫7f~Y5!ͪ=;/@:L~ r[K8(Fg62ܘ"j-PukvR'4F2,:ðI#,bfcA`:dQtuwNu+tdk$138Y(#f  #dP (M)2CTǀFq# %elSXCaLXkApYM ƩAҺw;]~pd|kH` cHE*8ZJY0M2bf2MCE:1$3.v(cc4M"dU!|{׸s+Zxig{4]QDxgeQX60>{ў0>_/ؒQTL{K{b-V۟QEk}(@W%L|xECUSM-khc+8pZSW.'p|1u#j#Cz=u- =QH[M}dMWAS(3}\=~8{KdCP*c:暖we,#T:z ktd`T#Usyr1g)( ĶWCC͘*88:GI1ag,Ne;T!Cґ\j \jW!jȎ m82<=޸xSǍ| ?,_q.=WGҰ=X>>a+L/d %Kkr~]\dJ9_P ""R<ӯ.`Þy=hi%a  MTK$Iz|J SH )]Z _Su>GݗwcaP_ot oYt1Þ_" =R,308 BdRQa*A YFtOzspu?eR ĵ*SS>Q?Nz@B(",0!)SzSp,! >.O=[+ʦgdcQ=Nbv1 e~:~_]y;ʩ˼cYkݮXV MOD;<W'h V7^U7M뇩s{7Oǽ5#Ϲb26]j_owYU6>u ւ":|iϓz¡pFC !V$&aާa!ޭ}HK{tjx;,KEWa_GE04!kFMJ.KNd X\"B0 _=ܽ9i̖8Bcs oՏ6) @&&~)c'uN1[#J%664$X"XJ[k'*iC,kfXq(Id&RVsef1_Kjj nI u4ak֤%hȆ]Dmhݻ6Dx/: ]*d ?<, h8z:_v|_2\>"&s_oҮY2أvh.UH"w׋Fi(n8])ħD۹tR .(y^ӊEKnY\CYGW'IIb"ֽ ąٮTL(ڈI䝅cnNAlX.d{?F++^8]"a1m#dS9_׽?fBsYஃ4< \uJ-1 ;uo)*h;9 aCư %pgk )9>.~}hF/NDWo/V>@'9Fyy =⡪=j $[|z,zf-T>U>K?o%aOn;0c t?o-oq%1f\HB` ŒAmwX I1ɸ/n|1mqa "Fx] J⎎绷:{BgjkB|D+Ǜ x?OD]g  aɉs4g'IQ*uBDhZ\V(q7B7aڍZJBy.I9%8=Z$K_$~YnbR|eCs`M5clh-<'k)Xv\ y:eŌsIA0b#9͍lNXާR,͵޵q,Bi~1'E '9O }(E]jRz8%>9؜JXv#;rkA`u[טsQv P0pa(Ns|gn ĚIv&Nader9)HcQp5ry3_fq6~rK2~D%^Y{{'+][=g}֮8y ?!D)TXtAq:j!+9b[*BX*Yvᆬr*f`D(CjtX_96gŴ jpr^R /aE^~<|sg΀_v&HwMNP=z+s { ՌIEGpk4v#%Kig_75ȬADd#/dS>ގ`/ pn2%?S PU0Pawʪhu`+i|ƐtPํ22TP G4& `NqI l< .pA *nusZH%ٹQ> +d8^}?̓n~Stsmsp4r0'XK q5th3^x(55CnhKQ[X廄nJQ*S ?|=^Av">[5(yű; )]8a43k9r5hNź! N*?W7נ Ԅ kuc-:/ѷ'7dǔVǩC%0vMfHr}\C?y]peETX#ΰݳaAݭ7s}i(DhR0aeaEEHX%;hC*09m;}geM_/%<_bBg@t9Y9q٣⹺*vӻ-{gl†4 *e;轈`~ 7_&v.tRMR!ƎC7r$yI5+u=.֍롖nهS G7Hm4HDr-%d:NFdn<JZ Fzۛ%^g mEw<!fvH>zQfńLF =)=9‚S0fp=,ߋUbE;Y׋|.G(1=G+Ggshr p?򹠙rFμ/=]AV>e~ k)TqB顦dqc>11A=lW@{grfh'`g>(LuV&)kS*ok$KV>9cbgk'Z*NJNwD,~L?9O?rO U v I.ʲ@̧0Z\w=OhnGM4Re?`d9@RKDu yG=H{`+.tvwE|45$q&G,OKoT (Hw8{@{yεki5Q(%3D5*9nW6_兽Y0wqTDٚUZmRtA׆2Qw2Y(CL -([YO](#BrR<$V\Lʐzа6!OWNckV|8GX{GӉÅSJn^ cE{n/р>0c>R|2_+uؘ̽kdPȒ {ĒOJibm>KJU&jB1c= Yp?/VƶC!W0eWF ͌ei.fUZrR{aĖzð`H)"V ͕kG9Bׁ˱sxp!R` DKQش-1VRE8CDc8q wit˃ ,91`$H(l=:A1B1a a4aKRZjsVy,Lu%+a<`$ `k6JF9Y݂mΥA#8XsnAV΃,b4%D"Ƃ-H6BNhCn:2Zs"[ER8uܵA8m_JN-%3x?NYoBͫ uu/G~]0jotIUR?>. X]xw>0IDp|Fn<ӻﯮc˿#.seΦ<~6]ږ]_ÊY_㎇*:1n 5er$/W}{JDS}sRUKXH*d{N)5in-W8Dg|) yZX.p@@EF^r]dAvO:1MtN:<%4+rD5RZRVHڠ`7T`JaiTl֥Ʈ*HچH\x9J[Pe*kK*[JՈp0ѸڢaTY_p9L$JNR00~/HG'Z$|`D20(5Ypqڢ)#)G`Q2DtaO bb18LJ)"< (id <`zps0cJ] %wx9P+y Qy,eN pIaw+oN{I^Ӎq3^= kak{G HF4L{L]hjVw-ͭJa5wQE>AģUA׈R?8<' nWkcX\ۘL''5M^@a4`I7;1illwfB77lj=9!%vCwp_ TmKL& b12#[G9_*sosjlcid!Hcpk<#,42!#]~+ 9 9ґF(i VwQ[]̆.`@W"jھ5^_pi':8Ne58DgEF[':58Т tǵuDSS-?.ԂuhoiŃ1ayüq'Cƚut^Lk"p?0љ͝;7F":GO# :@bw 5ݽ>G6w é)W1!)C wA;h!RL #jG.l7mzzgsQ xH!blCўD3m wtH TSި6>FcHJ4VQFƜ *R$e4xhpsI l܄jV4/ Hh=˿t AW")ثwV*K&T*(ȯU@Ԁk$5"`j 3!uR㎇*B@x*7kR_&+y]G9} m1DbLB=17<BhQ2%' wfd*XոTj Ӹ Ke)WiGXKICIp0,Gj<R\rDlTӨ Yo0Ɓ㻧~~1>3IM=~23k_M\Y8.D&(;gFU5N2՚! xE +8"DJy1DZ|GqeȸQ>gdI/%F)1خ3ϷMz4QZ*W&.N@qed<-edJT2,Ǭ`@{K'ۇhv&j>.o*, p0/Z քj5Rgׂ⎪\ozĞ 6pCmLk RVR=xI|L?^-$^bIgM.iW B qs_- r&ӥς[<I~#đLd:B Ҡ8x 0u96ޖ9(%S H4UݕK $!mB!c0-u.6is r*:9P1x|ra}uuAcaM9 V}|`1UH`"K=\f'̮F?ŵrS&̮h%a.GyJ~w}xڌ!Đ8T'nc͹M s%ck1s̺ߍFPW [D&ǀD3kymSn:F!-g*ֱϑ}r->Ϻh ~6x_cO\ysr˫'c=ݙn0cӋ_~k/^o'_>&1ړ__wݑȢ7xM7"&zzvom{';xkud[ s&3c~! .ds{)r]/sxyť'C&Vc8C{&~1S( vuӺ ۇ#?CQzY)%{=Ac( 1ÑkCs{oR ?=B?a]rD%mLo orh?gJ句OUZ.nHj}=n?rxEz:W_ ߶zôϵ[UhRЍAIy=MդBes_A0VhY ,qP}p*E)th1c>kY*\#Ev3к0ta&*Ǐ,l]ϫ Dsߤa~\!Iϰͷ\L/F+FA!A(^|=<,bmn,}p2]d॓ws$tnN:S}$ m",·qHgnw}q硄 G9QZ4DL0X'slmqHCtﲛB i,z؀Vgyqu[c]kP=f\ߚۙfw. QԽRVK򴓕R%9|+2g& KtI~w\+{ Pj4,c FY)gKtensLVEVAmTlII6OR3~B}j35; >7ǁg-dzn2 l?qr/KͩX:uY6h-DC'5ڞ:ت؆DeWޝlB.xBs.w‰],'VCzS"PRO_ͨg'A^9'L0 $p ')(X3'<߈HE9o/Er2dKڋd9Nʼn/48&27.V4DsYn>-}sf7^.lA,KsgMVK9\sJj}?1c.mŹ!0{nt]gjc~ vEƼ"a|tiR"f)ٴ^Wk6î UþWtս|?YO+R%~T 39OgҺ +ͼt3+C%`2[Wdخ5N|#cn~$8GXT"ӺLyjXɂ lMl` B`e"n9 + e@ 82mP yMR@r#Bo໽A&JFGHtrSܠҼ#N(:BwxUGuA3 DJxJ(V;5T;Zw| &(}CQҍ.V6p <%'ym ں|Gџ`ؒ!-&g%F_x)v_ٿ'q>Ǚģ>;>m-wiI玆6`OWW~4Ilw7Ӕ֟]]xfzåݼ"XëDI]UgdΠ1Eq ~ԒPk`W!.$V(WXxE#QL+JP} \1(2eO(0uZ:.zHxJ2"`w'˩^#FX,:-F} 6?y]vB4y G֫Wi]p iƠ9n!֬\zF%}#LXvql; }6MQ")q/fQ3JfJ0#MD.#usZmF7$ 7`pτ/b6/D7&#hleKfkk +"JPI_玼.P:T kM)ۯD6{Ztj';ZwÛ::JaŽzW 4{| w!ߌhU򽎞KG_aN=c/c]H咢3r MJ5}Pnb\0:MZ-c>3&:dBƙGTTV]Xr;uswsP'* Ҩ4\+3#oR֊ɦmt;VpFjT6VXLD4hMhZmކ](L?3K7>&n~T1 8j]ӌ@(jW iVopMYȌWS뇩E$eVj}u~' xnڨ7=6Q g 'ua.ml˴8"CaAFS̗ZPt]"XA4{:l{{68oؘ\V[qC ndc&qָc2MKAo=\LBFuF w$le}9,n# 1:=׏]Z)["p Bc.v ! Q(]G$R}"jB.3 4IhߧA)༒@>fkN t |WsWHU#K3r]+\W*B$mdȱ}16sB1a@ N܍Gٸ'4W ='V#Eon0%kC~!K!ބq P|ɐ@p S \ ]$=QUILG>hr5t4}n BRD C#  AI_&t1 <B 4sa6D̹D?еMtIZ+.0 ћnwɂ3bbk~ĕ >?'g>I+=3nx;|}@NPA/i}:==\.q^#J0 [j_um?d* ߜo>Y"l32Y8KVJI8b7n??nó)'{rvVTm? C>"=_w'L8О36mVKhߐa)ký{)\~؊#gBV3؊B!\ϾW"ٞ+(Y|op@T:f qL}/F=ZM*m ; c"OHd  K)Vؕ<$ PPAq !iW*HĒ8?r)L+tf^@+OVBUe\Ykꞷ7Ә$W,wȖAژnt؉B/R rᮕvcO'0L(aR<9zU'Vc&#$`PUa&qMW kÕU=j`L WkC2Ș0&^98qOq2 "^5$պk+tZ4 OlZXkͦ/ͦuyȑĔ!@r6!"¤@ 2mNp%tDPo'L!!(pr0Q$U9 \- JB"DRt {su-HEf.k-Lju|bK]4Dhb̙9Oc)[3Ja8v3oߥH{BK-%Ϙ։a%R$KJ$y ph TZL|4S]a:Fvw QޅR |b#2Jm03Z1~d`6~_jGfY^`3 KԅiÍhLG{P+),Xp6ņ"u/OO_X6xw\kDi\1K+y5:Y.XM`-;T'0J s#T$M_DN.rܪzg dj/e*4pCGcŧyw@EЂ6vw$iֱ\]-jօ}\`փ{c"๪rr7riuqߛIVf&>Rugpp<-}:d AX¡iݽ9) '`]&YHLBu'hϠoëA|YCns|j&,(5xvhKp>O^$oz4]{+'Q8;)WN]V#1]{jϑamJG.9lL?0]s>:Od82Atŵ Nu6pqo~4_s } ,Ta/G39M{9@h|kdؤi_mnDX' rHPh_g~x},˟GQa9_oݴ^ӡFƹ;'o_>{aSNG+Ygqwԙ=z`2ը0m~<7=4N%7܅nף?^ׯ~y~s]e[ o/oS]v`4!Co^o)sqn u_AʥNԚN>~JL'ym@gšolfW`Nm{}( Գ._0c_> f6sڲo֏߿/v2'¯.v f<__Ƴ_85LI3%s >d!WFA<;(lic0bA{v\ Rs-f}u$Lw(Dho!&9hdr@'Yp' h > һ?-VPCiy,%֘8M?O؇&<(:quIF?b|l.~4q&I,d? Ϧ3jE!T0VT:aVS--e1+g.B,T[Y#|O>m6ѷfn i\7ݮlCiV6b /At,aTِhҍmJ\as/H2$v4^Ha%tA!$+̏xϘ>$0IDa$MOEVUapϝ@1##IM'!+N"q@:)S2ZpӫX;"v֑^y .Z*j%1wz^T}ⷈrK1  @]a6Vxmi_!A͐e1~u=Ջ=l!^dC!lJ\NmYc'2TtL{3xaT^46b xxӱI<9h䵽j̭GzYb\tp>DElBLA~wNP΃CQ1p(:Ĕ>ez>~.=nN1P}9lZT$wKKQF2%)A5FPVZ'`,Lq*6HPd` |i-O}y3g-1]9 NI,NNKJ(J/"kJ$2 J~ bV8+006t'+G 4DŽD(Lc+u8#8VQ+1kDATRr'B2CAm( G\K0~nnKeVR (_nU64e&s+8k+OM,boŒx 4 ':6%U>@ I;3=ș"Q8xg0ɸ'xPl7 uT[c[qh5!&.%h8ݲzj$=zyLʚe=8+ b1[LL>݈-M'"3Γ#$1rS`U쭹L}1|z_(:w`n؟*[ yzi)+mB6N}FSciKpKQj]vZbO6`@VKuq}2AC[Hg`;ERd:e{ "/ڵiTq6$.&G AER/AFN5CPxۈ4Ŵf->rfhxڭVڰΗFt-y (C6@S;h +FkÅ|p;:4 ^gާO>R]sDYkt?8 ǤCjsN |gdTt G%h+rܔİ8d‡M[ =LUxwMc.[V4ӎQ;BZ}zPZ;nݼVXIF-O'Z"P)l=N('*$L),431cP8QVXas˽hr'c􊴓9AicT2B%X&F$ZQtcx},) Qc!ǵ6t1NTM!}Bs괠{}l\YmtWWW3Sm[ؾJ0 _ˮP%h ѥG,dђt΀ˠ|- ;P%0kA58VהAlaMkvz=~񉍔"/='bòr"UkIwˏws@x-{ϧ̉jDJ^TY '?iZJhy^_৴My5sk?={P."[RWݓOH(<4] gj),b!#5u Z:o}y 6"hyB@]w迤aOM+/+3riH︘G6Vi?OaBO:Q?O7ˏ؈M ]%/}E8g4RG2X£_*q K-ӈѺ=GQozBnWȶE-k7iz<|Nˈn~L:xbPRԋJTї7e+A]=Cn#V}$O2-EOs\L!ӇƷCJUeTLzp%h݁\Q{\PpsK hA,qP%1"Zp\Drk:z[-DwžXƅ4ދ*iqY J\HQpu@/܁Ŷ?֎jaیl-l䎚L,l?A]}$]b=4F&esSSVѐ QSsBLȽ,uld^NtNt P{S;T*k٧[&jn3 ;+?M T'&jػ9T gy+v҇'TZ)ZVgsn 짇9g Bv y2Dtyu?'Ju ٘/{&Ґ{y=_u~ކRr<r6Z4LJS.$wWE{""9^e.HIrs-8eʥ.y8ɥA[N8SyoVi0BJ܉e 5IV@p %)hpL9{ZDM%cBH8 #0g jf}_3MgB6e׾NA{($%(R:W|Ѐ50)1hbF)Ki5ruH_f4'؝jSEzWi~:v24Lȝ:e:B54;A"N`uj휀vҜ2v]ǎ!3`S-@̥Jns Np"0mfa m4h0B\3VWy\2vsid67$Ƶ޿+NriPLq\XQ2n_p\$Tib{GJ lB[]X3VRK\|.vޖҜ+2{}87!hpڨBTU^~S).:ch9ՉUsBQg`[HJqnsݕ/"8lY֧7|$frFfW]գҬ7K4TK9ZSYn#ZS f~q_~s~ţ2--)gcQ!xFF "WV'^U#Wbq2(?uj幣VR̴!cD)=X}y "ryǘ!76ދ5 UW<s{d]\Wgj5і-Ě7F#먝[l% X.pb  N;P=u 9W="v1DFϵ]2[]EUԘ}'#j_#USđjC*j +R E -e( _ve֬+*q+&'u~Z M\٥&xKɏGUwz@ H؇~>D5@/w3slo|PζZT*ꌆʭA d6, i$2&27(; w7_]7 c׺$Pf4D,|oo)`:MHMB?NEO``4'W'p5s؊wK&{=^q%:R3v+pp]MmDJs;z'݋mݽi& )>ta Ll!|Zt$rjX:[l}f]=+uJ9owT+qI0\)Yf./9Wi:Iy -JT!3VR%Pgu$¸ p+*aF<&3nlԠRNV j}vV}Z>GP9ZCϡ3ӗ(Gpc!L:Pce*⩀*h-+[a'hT="q2"CP^[bcrsnt9kuZ4NJ|H)#d,~ьK!{V"-K:Jɠz #s9ق-pSjG`+v.JH-Ž3Ůb>O!n~{2VK1~Ϧ3wM?4;ȜgTyon"A̩Bxé#EHט.,ug|VD4\2Bl_>#/:#f Fgz=B@ uPrN57[wI3XB(n gJY8*cRʓOoWWWѠqxKNKBR<:P. q$Z\h(RCc bѴ[ÁVLXh$A(A/9wU$=<ı`otB%50<|?[ؗLwLj0y9JE;!yF[B9 2.^1r\Vgu쭶zِLm6Bt9ǰcGl!xۡ;G?m;~pҰk='ձ  h;l_Ҍh#^u역ˬ./z10&mm9]7Xs&P2Ý?e.\Д<=#e5G8Y|÷'%>)18ϐ1ͅ 3G~0>g9|vCr#duCRx6YQC~"r4ל]A4M15pg%&'ΕfizݛCZtMAϦ{ELbz +8TwΠ煽A1NzŤd6;n_Dp Gc}ˀ{]ﻘLmKB1.mu~|*TQy;džPadJ)_&h+|#h?'^Me+2>+$/h^~-\bqVI?S]'Dv(ڮmOO@;5xI]@Dct_Zޕ4qNHd>#W^M>>9jPxLЩih1 ۼlC\D _ަGem8}6BAQsi0wCa-)ܚ<ڒ*=8>$Z>+4YO j?'l& Ӛ8 [Gi%n"h8x`L([FKI%G"BIA 0 Aݔ 5R hIN>&Bv['92|6@ +wf,UưE5xߜv>%LԲuTWsh3tR0_J[fhHp%eثh4&)# (P^ LE|b7As!NRDonąɩ!&Z[z7ľŽ&Վh)\9%U qXup8)sݠcn 7bd6[^BCVQrp#Z0,%,QeBc8XLyW`y"Fmpөkҳ;J͝}_BzW@dcQݥ8IЈRipJŕ gr1nK痲>pk}W}=YoFE~,~l'X 3ٗ fӣYc_5%KDIHh`5Uuuux& Spv@r10" 6.|;eG@6Ncp4QC hc(UBljuK  &CP.Ur DJn;{&,FZ06 gK8{J N {D$K>a؏>v:,d4#*na$ť8>GP(p;O;ѭx:6aX*~~ p"(BߓیS%8Y]7m0Ep4_&=nۭ;O4 ux癠|([l<>(> PDIbV2紴m5ZdJGCŸCZ1J-&T#F~8$9H#)4G_'I5ࣨp ?׀b)+XW#*N`LV#`|5KYmI}",[Q- ^i,$U^)"kdczS}؎u P:{),y7S~o݆1@3,7ﯟp!͵k%sk`h n lBbū chm}TUq\q?8^^| : )1v=V8-9Ɣ07;)"* P|"mhL(U/uX,\cS;Jr cK_v\O%$mf䠻r&%].38*3T5t. CS!93ګğB3&{Kˉf+uS@T v'C cBCF!l3Α2) 5_ #|Y2f8?C8y(]X [1)fwZ~?f{_[ iY챟tZa(9yTZni,d8IL7уߐ/CuG/v㺣7gofɯqVxHqB˧[7cFx"jn}6MIx̙)p2_ɹ]ȹw879qLIY"1&/`'Ja(%}NBRa#qz˛ߎ[i6MnpAq" nplLgq t{3%Mf`n;>| R1M wƽ_# 2'&?ܭǐrCc6wk>uQj(p 61LCKF  EacYP{S?zR_?[(ˤۏelU}u![ 1Γ)q˧oIϼswՍ+|o>IҟlQ.!a&LWw+,^Jk^rL$XbOW`oY͎kPӸ1{E($pn5 x۬V"eEPG04UJWfhc7+PJ SILEϩ^ uBe'uGDqpi2>GFuWH*;ԃ)X,1F!A^P':: $ɺM tz)_a8 hru*V!k Ą q:FQ@Y/ELc O8ɭHHċ؈ack9NP^ KW"ŽbF×s5pIP!!62s'PGIc§ Qd(g 9@`h̀}`5GX]d $(B 'e>0W փlsv<)XQ'3E@9B/J *|NQpNAR۩"PHI06<)0֥cDa_1,cDu(x94QqtI%Z*"Nۆ Wo۱,ABZF>-I+4TI{pyj W  7Ǔҏ?xӛ|Zª$LaW3Q [7)}C~t%ȮKÅ4HşfX5Cd.2RHCڐ|%x-lyYjhE=;rf@ˌ% i Q jtFu vT 1X}5#u0E\_O&]d?G@b5G[bvZwBt&*-Bg3'ijfͲq>ON@y&{t٢+{l*c[c0 crv']_x0aV&( b41ʝ@Qfޗ|\eۋL<ڻ.J7x wLϞ8lgÝ{"iB?%RDq %:+wʰRǺTUDU¹HTь}u"vc]˺6^Q 0¹n&E !ਪsuNHT 6D]Dh4-T :\߅a=uS'O&_ƻ*8>>3Kmqpq`{%as~<&OÇxao{|7x 3i˾]6]DcjHM\\{qn= G!ti8k:{J4zQ)&NϪ]FjĜ)olXmAV/˘Գ]yL]rE-f17 vEIR)-%Z MaW5jHdFZ |){vLg0V0Ncz0 G2{ <ܲG@eGޑH@bIf|(`de1mbUԤ&N04arv z[Ikt7dKʐ%!u6naǘJ-!,;k@r̉!$gH ^iBUkծݚ3Ia$}7Lk.7](yF3 /C翷dg=;Ǡk3[_ /ܭ\>;?b6}s(q9d:$?-n|˯&kX``OXl@s$$VTY`D[,ب9HʈM޿|>/~[洤Ombف>/\0`e"1_ٛ߹m0Z4|s4dxA:{ϣek2E|lp;-̀,O(g;x̒Ŝ[335ɠ!,)ވ WV{jKBGn&N+np]]p}"^PPey$*)h:'?Ju~<ӃG-m(3I1 $aF:Mu$T'ĦRܗcRx2!oy1J`D} vҟ\6X8 %$X@/.DꊟA}5Al㬘dًn //$wjC^P)7sI*<6dTZ5LjѶ|ow`#֞oQPhI J"c)&be*nHp LR8d Ѓ 9AuC*?ALVǖ Jџ~xrAlElckAE'/OGbTu1[Bmqb"RT-)rTwIځ<Xv`>ySbSl 2b`؈86h8p #qiqA0[Fp@wW0XKjE5' ߧ8 =9c\i&G[Aw}+;sC|ШCbf#.wҋs:F epkIx(w O*hm;BwA2*c޻VXq KnYGpNV袝yoX8C!? t>.)nW`~3TնnT*uR_`CmR9ֈݕ}y3%)_?A olgTT"͉:׶7HjM.+o$ PxΥ|X%C^zMdLU~ErVR(aW.@fO=<"#=<"#i&XŋfTKYy w6|jiVoiVQUe@@".b ׀vf'F41#f`ҥQb0$( +dcgvLHD )WXJpKH(1̏%X$|7| !F+P#H^IЊ' w3P-Yj5BsS=I <f 4&b)O$ JҀ5m?T4ګIa2aR9v:rdpH??LD Vpa\FAj4_Rkw~D;Ac ƒ`"A[%%BkJJAR [ U\* C$I._94a 2BҸY7:bX(IْC[ :T7JCI+5 c 4Fa Z&ۑdK@vHKw)4:'^ȞE, Yi*h"e)mXH!geSXZ Rrpy*ʷ?^C@JAP'S}MuSyBP)Dx$j}}0.h0pw>ÂF;n|:n^컣>a͏MC|F۩!KNΑRsizcj[RG|۬8zJJAhCҠ#We lljEqo|>ǽ7|lccwTDEr"X9Lxb1U lJ9<"x0ظ@eJcwf m-I23?[ǿ ֢WPݾjFP #}.F ? 8gVC.[ :hN+A֤gumezqmL1o|b捫cd@Rr'V+ݺe9qo<ԛWq`ܧx~?$8"{4 Tр3]+Jdno芀3]' 5k>F=nn[%5[㩑.C8U"GXJœ83#*_fA v`rىEgaR1"57 FpJ!Jhlj9S|` Z ~Ɖ".%$~dG7<5Ug!O5ʚVv )?&Y>OU#fzB`oYdK,.ufMݜ&Wce)N2Q`,f|OU*I]}X#?||z%[%9d5zy`xH#Î/!=W>{`:8a8o/){_~gZ~gLJͯ^;]3/$DpeqR~K+99ek`n^dR$pE[dx9 "^d~,=BWc.)[ u^W` [8Fec<}7$VC3rxޭ^J"p3Mh).Ǭ)*,uFa͔ύWWhڀN p."D^o)ʸ!0Zi9S|ȢK}E_A# }nFn, Ѓ~gvl681TeҥFR=g;1OٻFn$vh"S6Yvq {߲lr$Wh=ݭ~5$HŮUXE Ǟ =iHnbx‘AՔf?룜g2-~ByYL6C=GВZmmDwVԖ Zʹ׭M^|7 iy;!˂{G$7Ŧٟ"v"X ucZrJ-RvAW^9$@mԿ ŧ-XC.fNkqf3BZCF<ڈs1{Ssy" c+f2b8&o#v59\29YWQ'S)+Mh7 fjv:p܃ZyS uՓ7gT6N+oYFe!Nvh3C3v8oB1J&t>Q %dOU|ja6- ޗD[# (D֨T| O-VՇZwB?v-гyjAwaB<\oFsX:ZB9 tR0yj4d0.]^&.Joy60dF%J4"*+|Z?J6 clv_nY">YHϡږV[G@J䚩FC?(oCw0[!*e_6?yG8꜒fvu{V{?/gSš V#/ξUekf/W\"^MA/k }DߛV';갤trRMeZfå "h$FI={)`%@%hR£Gtќ1e-瓶C+P ԽKѨ;( xȣ!9ӁsV_7҃9v䈚(6H):$nun`zR숡{kqL1RDF$۠e5c9F`PcWuFݝ~mk/36zo-jqe?[Qo^1d*bʮ;ԣ㋕DQZV,{0tTxf1/,Z.{d: n 5$/`ֿ1Wr2Pp^dLbOY[$S8o?)a#c s p}ɰ,ӹ3ˆJǑ~"Ok\F}tk-Vb5R%X5r#9'iZ_[$u樬zWz6w)Ikwjnj4Lk)}r̘*_;ZrLjG&ׄxCR5]vP@-p+Z47ho>meC2ˈNB;ˣw59}~ך662n=Vn[/> Mz(6U@d^% E](}sC,5zα}~0F!] 0tpna`rn_zۛM'}"hHguz&Ҡ"w(1/X Z`'qh]fsTJ޴NZf)þZ#hߟ-P#u FG㘂, (EirP8*knɘ,rW_d iLP*h)H&,E083lC\Jf-LޭEܺtWdiF+ܵ/M߯dy:7 Vyۻus3 Bng-H> KT*F]n_;ƈ09XYp?ǛVl ]*VC[ r}MJ eȮ${1'' FFrT ,-s!ؔK!iц#9ZaqLa* MbLUBZ3V컎ږCܛj\(<~? \',AHΐ*I5ݘэr fW@_ /`Wt9Ju3B!qM]!6{Ðň}-,Gd-7ˏpúGf-7Dяp-ztþM1}Uy!*|A]Vdex{+xVM-yЅdIi9GU1輐IW:+ Ea;d/,AaD_7@ڞ'0GԀE(%^ݫ&t.:n%1&齛=\Ya}:&7їMk%]>>{`EG }⸶EN(4XܫVJ>S-%'.j..h[FdVoiPRе5tֆ.ཻԂyW"=2#Jً{e7WUVT[1fE66_i΍iclkAU zTkMKOdS-E^tRb\LFWeOs34v^sq}aAFp޾Pdo8,1W4ta`(^ [}?Ѥ5*a\A!E X(@06&[6ppQ04$i{HKme߆3p C4Z+[:np00$ ؇8 а}!:@ }b"Y{w`Pux)hk%*ؐ,B윲tcX5@bXA#?<HҴԎDڜ1ҊRZΌ'i& %ZcZDŽ5VPنɳ=m ΑS *zMkJ[V yBQFAkX$7 Z%'R=bhG.)='%m*v퐫vod ZzSݵ`f { I ]7)TL+5~UxQ7w\iZ{[{nalJ$\CQ^!A}H 05ɅĈ%4+Z,چF`ۮΘgu*$*E~*$|N;nPZd;‚Z<Ă%Axah*0AV|\?j|@'0S#X%x7MZ,iϰԋZG Y xeЁhO&&nR2d6[ڻlE M^ >MQA)MdZJBHp¸T%d`Tl62k s2rɮm;Ie7R* m{fC$Ev"> -=ddn`V6<>)BCRSzjm%bNv6ނLMfe6irlZ-Q.mVnӪ &2Cش 3F&qQ( Λ3&BȨZA]St'Q ' Q$m@`G[AɭaT~%Fa9- 'ٻ6$Wxφ6B!ivN # %y}p6q% 2+3+++G Qq,%Y'Yd4e.rFk 'E$$B%wx@uf-QJaKe_` ZGQ+-BR2ᅵ#VZJU sTi7ϗ;QDMq|# 06mjh &KW1$ 8x%Z,K f 'd%aqFFB":8kE$bCyÓ/+L %u#O3, GM nd `D\(^q/T%CB.OOURTjCN;S[X$ki۝`F{ݨR< c!`E*s;&5z_~cW+)w$}ۧb\GEUәeฒWzvzfϹ Hhf1QBA+,|nrv^%%PJSNDqɶIm6.Yߤ i ΍S=g>` xk))8+ʹv] FXk M:O+GT ma ֑A"@d((`!)"(CL$TVFGupI e b_tiT7% ]6c),*i0 R[uDʓ`©RaxR# xe< eW1w?$BpɆ͈*v@'}+ qqlpL1hi= ^n`[) Q -"?_Ka@ `qLڔDA,f0w]ĚcY=芕77QxHkq/erRq&RI@1'bp ӎOԿ5we$?[Y%2#*ٗ|}% )w/c^v)7FbI:n𲧩Rʼn1oDGiK?m{ڮDZ#I O$ˏ RB x)(I=7OǕE>$]˲QsV9,Z,~aSH..a3_/Z:+PI1""}ЅDDB-8f:ZH~_wą8Df)y D]L@ ׿>w{qE27c`)[^*RSf-/qdG:(&ͥg>:tj[i!3dɘi@91 Fh (&& ڊ:66g*9r E :5 bHIW o&7GFqX*F`yTpH pQc:e:5rA~0Ԙr0E %wwj}C'48H髴 >w>7C_ }^"1[7zhӸW7"Ud`S;$}$4<(T%lm!UJeh1ՙe& l#f% q2/ǹUIQfF?.(Ajv7]W؋+ŋ 8 vHE3{&Si@.6yo m[]ƈ1z6@?)J+x\2| gK=+i&SYC3TTg@R='=%{t\ (¡XhsALH$ٹP9ҁfDbH[*.89HK" #g5eF^7JaBc#a.}ǰ^ ZRyglVz AYuFSo Aǖx6Ĉ .I v0CyC6.h$qU+[F/A\ qL-3FTp>p*SV jLإr R>7+S:pX8`\Np,'`uM5K8skzcg,]p}^r(;ycfC> h/|ev]̮u2]ܰ;H NV7I;R)xp#L˃ZMj@_k0RXEIuZa u5WLYLkƌAh߼5[sOqP16WG0%>| &04GMJ+LxamxEpVRX'ln%ҹhnvZ[XG RьJG(Qڈ QHDgyp$ʣ`HPFD+4' BOݲDtŖBo=%𢥻bKvņ{pG(DhA48"lWlLIJ!VNK/{<V07A9)VFVS@ <ɜo?@ ѫ%"ԲP҂:x ȓNd|Ӛ=';) .0-d7Wbe;XR!ɽ骻]0a7s0|hhO||.-P_h"Xfkϐ%e=q+n]lGn@W70L?ԇw?~7޽|GXbǫՏozy/|<*#Vvn޼7g4ÙDsu&Lpe뇱h\>q}BXj{!{?j FR*=.]z"e] U^ uwؾ\4Yr߶o}> ÷y־H9Qkh Y8$A_0O6/ /nzkzogrnZB%M/aw0$ \!`Ї0/6T"l|~6yO[⸣v}+ԽU|R\=nr[R32&#Rsv9T)whZa~마4roH@YSPVx{3zk`S|{ ?G n¦K .ku+%PiG'?o}k2w/wWb!bKNtJ09y91RӀdm:D2W d{FI]g6 K.khxJ%jIyJ,TXr]t5#TM8FY3t=OsvTuEaWsT î}9*uo(iW'-&nzc _V*Xk΢> Pb]ڌcw`]&JpIdp81ka;\Vk_jrZ]k?50<_#gZ4gZ8Bb~? {8l ~[=P¡d;@C9r,&-dOq8ދ9DF9 |0aԊϑWG/VO^ 9o^ cq_E?"x֩4bTibi!MРHg'\<!x1UZ;!^LysMuӾ^I^_Rb]&7c\ v7 ["JK|,AxZ<5.UvER@)ryq7=B*bH+󝩅kT~Md=@sDEUT{* PyzHZDzʌuB\ l3P&w-A49#MQ{H&7!lk&p5xC3m[٘V;qc˕[=bvf.]+}x׵nYXᖵ{$U{x{ŧ;gv2Rݛ(u#=I휑[hHRsca.YJYJ Vr'uVBKxT'/2-ZUn@[UY[}ު%e_>߻B%w˰^{> 5zRGɲj $6OR^ovWX|sn' \ۋy 4XKh8?nMȝ:~G#]Ic8u:{O:jl gԹX=&RwjkkVKKo$T\/QZrɞd.ZICNJ`ϥΑk,#O@\@〴MRC~> 1'7yOT.MF5H5eJߑ4G>~p-l iTs$vAː?cHe,˼. wRq2( 'Qkg4&Vɰ mPKTʧe<{ݲLJAf|7q9laJʔ k@4 9 9F\z?4cFߩºm)E9R>~2Z l _2YMt9X8~:ͦ"Zs`rFh  #o~**=p02PxW澌ECI3onlՈHΚ8 Lם*/FƷWOk |/O,m9<kd cE+:S1v@* iz-/ݡ_~ڽ@"}5_V,p ^P%/cȋ=%S .;F_gRfGvᴘ-YͶ+%c?c&ӘbL%)f]=qI

q 9(Cd !|˙-[dqW}U5#6j1DkՃXJlwWsiwYJ٨Ȣ8-RrgsÜVmw0ܴrؔ=CuS; Ϥq%=C(R˞O9;FuspU϶p#1 V.k/QQHEJU Fl^HXxRO(;W6%f\|$H[bH}fȶ0δˇQה%FN c<1s\ۺL`RsC{Bȓ':gSD\Q'b^U v?&ҾX֮ƋG+`)hry[z[/SQOZp5<Lxp[r@ԧd 2ff iIiF4 3r6 ~<6fn_Na6y-X\*ϝUHgQg `1ϑ);-oq>5Y un@c<`m6Ÿ0KUyJYmiպ77D#d gBpP+Cu+! SL Rjd!!QG0Č<;*p Y`8έ2O "[Ys$ [ dXc$5I[%KP(z?'5Ծĸa1$Yq =8E E0_/^=e8LI t 3~ͼAh)-Y|oÝ#ۊ\8"Tm" )m!qjW̭#L툺ڦxBhc_ڜtdz)$NwxM"ΙK'T\sO<*b dȌl008!c2xqQZLUۛ% ;&ޤ?J?99UgV$QfT M.v?P9U9i3嫐 sSq甫6/*Q'9-mo`(NoյJ;f3iYorǼ)}%:Q B;W)3ƌW4gS?~F; (|lףiAmW<7z^璈jVŴ:LgI>pb-T*es@|_&F䰿"@qTjN {h̍We!Ψ@u1%˓5$j܍rg3z>i C% ]#$lӊt+1*Eܛ~5\cXk|@;co_twqV"ܻg1o -+kn#GEjH8؞hG{ÁSMQ2I(QG :ia;lXH|H$ fYeGGQ{۬wvڪ'WEMzj4R1,ň+H"֩mK9-lu@ -n_GYhP eYW TIն-9UߡW0 @\O|r$6%#R4as}zv% A~ԧl}~#C|ڲBH 9RP G:.RcUic71-4YEB~̟;YhIagEvw iκZ<Ƣ#ye[Dͯ RyM=١7#`  K j ?m% z$@ NE6zGc5sLY"ysthttK@(#uGqUy̎\DAJvdc]:ډ9Lhxb 5\Rndj4 C'z>4R֢*_H @BGY aK0^+Ă$-Vީ,i$iiZPC`i&!XGsu5v}5MKék^N(uk(]!|SG'4T{ʮuNwrR.l$⒏/hqmaJ!->/RR "Z\\eE0j hiP%5uu$nKJ-w:ߓ Txfx<O`H ],&P]3q4Z5k%&PKG)1XY?ړm3I{40E$g1s m0Dtm^+\ ;ڋ@FVWCp*Yzq9ٿf9.45`!7v]a޿6F 9@yϊ?zwş! c*Rn*+yƥZ;6ʬ3r7,1bJ5<*M\xmҏِ;هZ]]{H l!1%먤\)WLE'iJq$53ܷ}um F\%,S(":

Y,_"?Z-|IB p"u{rfVc ,cZh =]uM?MPg΀ kK$(, ZgV\X,,X QE0eq NlU0¥^بp..?{iz∃ Y͸Ey9Q2U*B FC2MLlj0Mf>כb>Oy̲4~|[A8E'ސ"eK9'%ݯBou1nmM&sEh,j4sŋ"]mv.۟}.SK=*JͮWCFZD{=W) 33+ lILyGB^9֘m**0Aٽ,Os im],&糋D "*[R3JQ%=P_-h:8%%Tf e#N!8i/D  TR(4ur| c@0RS7L/:dod ݭv jgƙȻj{뽖O .2M}_7\s|E+STNA1t ĽL]*io9X!g)Ҧ󢶽2Je|@% ,4,k=cƓomIWOY 7ۺ'ؾ'(%c!HrJ%&HOQ)Fnj).'[#۷K Xk}adѲ%H>ʾ B#A=F`Jœp4sO?kAmu/ðD!wX^^ƆSpKM۳vqD lԝA#BjkmWmdar&A&/&!+y~o3Lhy;ھOIBXż}/W|jf. &,[ASmHYmTX0V4l|P;5nRݾ{zy#T[1N/5őHOP'Ez: Bkn,=+x"Q'*ksC) pXkDcLqq79UkJ.ZRB[7-ܺE[~u-mvtdR\+Z32^իHw3J\0~nGH k^m/5^*V{Oȱӟ#͟L#qØh[[MY (YRZu~0.~5L,w1ju/&pMrMKxán.Nv/.aC Xw]#O` 8ty۳3ۤnljs) 欦$&{P.Udx@\1]ԟG!+pW5ɟ * yԚ y, l;GQIƖZQ}=iW]U͊ڪ*ͼ ^Et*X&7`Pl[ L8kZN`"ŽRFS\9c%b48MԚ'Qg0"`d,S("5'#hHj%P@^\5k|YF)"˘N萤bdM дwőŅ˫q"$&-Vr@|2]9Uh6ߔ`bLCnB4 yYشI1ͦj4J8j&ꌼAbj,P$ӱqaAʅ%R$7:` ݚT9뻓6a|~FpJF勒GO /TO.Owv]ZsEDX*ט$8;W{,oI.>`VIф}Kzrvz7/BG,Do}&Z^DtNF~rKa=lױjqy79cr4{` fr5\yW -q6S@Z &BQ_,s. q4)7kSʅ s)gvo#\s.*L[7o䚻)O w!ڍؚs8H uO!!ؒO;x\\w:\g8߳}v{~RƠI!];OocIu/Oza Ͻ~#ki53Ŭqk!Mo=N5Vg,DzOmCoxv+c{^q2%]"_n)֏d5oGl߼ 3ܿu֧pY6oE/4_RM V:0طhkFfg% }? |1h1 əZ5)TrBp! o|~aÄYD}4Bo/4G?k)Gusrsͣutѿ\?:)/x3VX#}7!w:KYiB;<],f95^> N8]knENF1 dؓc1 Q!ejlN!;E%WD!Q41VW|Ck1kt*>/Naf9w {p z/o;39S2z47z]pi;)E,5*%XZ^$  i;a+6mydjy'ZG:)*򿩝ӣON?ףGWkt׳?gW]GWw_}<Ӻ}՗ }?qr^Wof-F(g]|7{G'gWWſͰŷ/Byg= 3,yO#o;-)ŦּLɳBڷXrp hڬa!2H <%w~igϦ7ᓮZq MYVLWpr/MܳFQLҝQ%P5[Vd$qck\'P 4Jťmf-|J&4stҭQבl*:ϊx LiDP DWf"Ev((@]Ҥ~i5}Vkro@{Y2^$Im41`HHvzLIzNfUOKI$Irgԯ$}+9x <]cF>=Eat! ;Da#{T9 * S:C/(uN ˌg5`7{;gDm"!cqˌ;XzH4j=hU|򎓪:I}Vc>o`3G-hʎmV=w B 4iG-8[=Ms;ZV;lbgOgDkߜpcA͚-"!*.OA/t紐 Us@_cFq%ZVj* 9#ŀMl:= r]#C9j\Wj!>0C,Njա&X"b\)zr}4ઋ9Z ZHZcԄVfBjW1o8=&-ꍴ$B2@BX 1thEf.9̹)T97 fى7o_Cfux$o4z/?4M nW-Oo-'PO0p%ק.%81sl+7X5qvO\* KR#ŐnڎF-U[E=b*b%DP3:) _=%#5&B#̣ޝ3 ZT?J&aEq` g0ަE&[h-X)*}ԉįd3TUCϊtל9+р9+q$Л^ŤO%. ^R qϥ+ ZR;7Jxqԛ~|&sǏ'ϱIѯ[Xć,ac6D|cr|SPK`@&BuힵSܰ?\\6Mz~[b]Q܈hVwGG<޺) [L|M}i[wԎpXڀ^Ͱ$MͳڄaJs}K5Q%!6$Q#"$jG\ ,Kg"qqEkmH /P~->qO{'b,Q I_FL.鮑Fr}j:@,%MJT+l'LV\oȌ>Â#xpue19Dc\QRE扥-yuzͬlRx~^vreXuZ8ɭH8" `-Y'(?[C9WAozH_.~VcP+ B{dqIERmSdyLL\Нa FR"htB 3سv< H=uSDW!X+,&6>mderG^1' Ĩ\}LGm5Ww0 ugO,Nf |.Ie_'sxgWW@p̽1勸,SHAxGIk749A'g7bד y[pRWE~SFPx=9Ϩ@7%sѠU IV'(۔ހ<_> !ղʑ-23,[t"Ҍ2[fj9Sv> ŰG9ocؼp(kIǰD 1 2YwъYOT&ƀ[7]t3Mo|ifb>@< ?gKH}BHN %!$e+ ?\~6@~/?5㽂'Æ^D-u괈6M8Ғ]6p&ub-S4^Z+82鱦ש3ѽ\ Y8 kvZv{k8[4 3~X`.̢w"uܲp?c֏kۑ3N&U#>~ަy}<,tx5B>a$t빸L;V8dR0DR-'⏿F8P\tt ,:#:zf0"n,6Syi4F>.UcR=K?yt#} ۫ܧqB*qQu4\\~I$*~ǰ|yz<13RVOɊf0reph،k9k3!SBʚyXӎ+8m~0q5bGDw,/j>wT ſ^WE4bCgnUNkuv]7'$y5*G"bK0Uْfv~5Rx *(c9VCpcL)3-G8hV"xh5W%}<n*qz%p)"$Afe3x~H^Ѩ/R8u t9B{F%q]6u|jI5#E4Z^>HX>VZSΧb^pMxꀣԲB lJ1_p )s|c}aLj5s )rŒg #xxfB\-a;M2F[8qTm ̣>VyUqIq9' 4hPJDEHg  k"xWD6$H>S{%P bꣵmjZ kk64cdG &rO'be.~5e 0J.{;+=wP}'bctI6cy<+][pFzT/y+!pWj04F')o_wG0M7r<1@6lyz,iz6Yat J{^b c7+of.?toCLJbqH R0ɗ**mYnB˥ I F*@0BO}JH<ax D%\: x*qхD `7YHֺ5YkbڛZt'XaF"8"Bo [ AfYBĄENCie`P<#!jk­!QoN5wQm"r,5YR [NSV iLl&V|cѓcSsUꂕ-W1#)nFZ*xI8&4pVPsg1mӷc"bWQ3sk}@hW Q/ !XݺED:%N9/Bz 3nh#!mb5C,aP0A1+tMq%TqH >3(.Xc`KXPg2{`iq%r[䈶$PsF< 6r.(½=ly\\ :ؖG=ښۯA2%QH=Q>/lt7t8s#8I iL`>ʵ bwPfXF%ᩱ9Q3׷l5 9^u91?aBsflWC;WPUJcH%AǶk|1/?N`$&rN ("N\޺2`Ҍh{Uv 6J0$ZL)<hnkRtO8f! uyb2\C"RR\X!Uw$\.m}ιT#7R'4Ą;#-t*!pV\Ak4(%G,ψKEH^<T[sa[RA;&QBо\Қ.K- nh+B\@f@v̢f`e`(eeBHT KϘT(=y/Ds_pN葹uTmn!]AdAMdiOxW'j@S# $0:+PS]yC=cmX#+b_%;T*DBF,)ۻhH#$uWqӊ+n4^q@`⦽ )G.z2i QXw$l4GU2˵Q]+W!XG4i:49 /?__3-N)&&lxLytbw#dP᭻ [q] qP*78YOFW :GD*zBVnA/ */4"_OS}MLjY_ ʵ1eAUz2R^JQzYM(jYMv{9gXA XSb=ұ[bT.Xd*lH;SHQ$v.g1>rkab&7g/d6= }MfI(wa )I2ѩ3 *gtC$#Đ ›ld1֪"RmUT8Nm*Ho'ڰU!<;d{ B/wqiIڂgL< ,` H( CMS_h Y`G"yT;/-T{ޫEHSYn2߷byPSL-ʱ5D&iND)iTsLyr?_o0Ysk ,rY}efҌA* 4z25Zx_(br*eJQ1U .b $s+B~+= aWʮrѬ=aVCVf$\qY'bdJi)d8X[Y*34gnzυ"a_g?oO-+,Ď4W(ju1XXN;X%,^\fݢ'jݺАo\EctJ*1Xe@+ɎwJBQ- ]ڈ kMa.9fjzcpy}~ =}Ԝ; {+SihMS&'4>W ZIzc-<Ăϒ$Rs>^%kǢ=ȷp[;K?>U =U U_u_jk,O3 SF#ZΟZC!-Zl.| ?-Fs_0];D# LKtYtب-#|G9T;fVKN.5!7 n!2R;1ސXkLAy[e1j yl\eD *'%W<<|~1bcQ=d&]5G9*.[lVy=Ïf~/Vd:`u_7-"Wfzj_=B/x՚, F(rx2/@ AoOcD:cP#4 `kN KYE83IFS.D.1aR04q _ $n  $zI5(]4b-{s)5LHlj䙰Hi JXhyPЖѱ _}w7_Sxh0 ~:vtVLxtVQ`],>5?wrMfy>hbN^WW} r5dqYtAtkbVfv -'dr]U-I?&X2Mf MSHR%SE rƥTYNei-O1sUQJcF`-U kZXsMn?Tob´s;`PɚM*ȷDR^jI/o̭*UaE@Kw~m<9{~Wh*NMfHcD@y;wS.W&XLnoMQLF\Lw*fjXο&J X ;T =%p(T"E~ 5"ѝ)3jb {=mDu DXR,;HM (BdL9vhU$cCNF\fw{ TgWQHZD# 7aGE4 -|6Z5xG1f7 vP{ ^HGif\'C*ׁePq7@ -~?{F+V☻ڥuvT'PswwaJ{=~f}TL٫3_zF5]Vړ";`P~w4#T9 Ƅ2'&|v +,jޫjfRx&Ljp*0K)G؛u=upkrBK<,Ze˕@B 'feAbY[ 2RiFj`e_L4ɤughr%$!NBH C$+4ZŊ#At'grJ]O,XCʎU:oM8$K'#XOr99 3O[LȞdvxݗ:\'qp]iONDU~-?Ճ}%=|]Cn+\A'xp?7 |'+U]EAp Z)sYӥw0^2+QBݕR`lTZ _aꬲ9jlD:*1X*#*k'f)g92Ys]We"QulN݈ኒS.vkypF4:X="+I$xYiu\ *µO@{%L!bqVT/_}ݢ<]ܘ/ |Md:_m{5'BpNT kY )d$/3˫!^NoDD[=-03*PW쑙\ ji @^W B8IrAa-bDdDJVoB5ūW9(jK0 +I\)sE )a3_7 :TIY9ta,ϓ?y 6G@",BJԭ~L,hAŖOq᥏ /}\xj\^DRP,e8˰!!D`  xVyZy21SL|y_wXx"ʧٺ}}! C B̯JxD,9Y~v4~ykf~ YNZ" }6kg<&{?x>iz}B{#$\u<}y®ɓɓsDuy>at]@aGG+Z;,ޟ`h4o:F WAay"Ge->/ث' )R#lكIRw}ov;Ǔ&>"O_p$0((S̈́  5 ۢ]ྖQ2sǡ[W.S8*ZɞlR (*E-b\@3hZz4h.f:ͯ~[Qjcm{zҽ]9yEsxY\3*Gra÷lrLyo,!=xY,sSV&caPv͇Aɋ3Ybԯ7.0>$tl,60fv+ҟ(y1:br 6%yVQ,KlP}p=xVsZN<F 0hߪOƆ{qj`$UZRvRI蔯4Ƈu/zyʤdD=,+d?@#cIq9 ̦e6B?O>), ΁9֥~ʦlE;mX^LCxT/1rPB<գ aLNbzRz|1DYPlpN@9tdA\pdȜS^tRRpʇHƿ1Z?m=N4AN#,+`vqcx1][o[ɑ+79R/8b3٧F_eN(RKR,߷ÛC[3FꮪJ igV,Kn|q,1H4|NU;˫N FωAۅ#.G*Tޮ>1 /L_sl4`ʆAŒ_RRKT,U&eTD"P(qxO1/5n ;RY)H-|;#͜b q $$xGRkDFԋc%,xC< 8 #8Uϼ4A/f B뉉AU-6 ZXƛz=JKkʈpZ_8o#5D*!4a.hX"&|ou>]VxdW9P<ܭh2d^݄%x@Bυ%ze $Rd5Y΢Ȩ<м ‚E aitEBIAG 9n TDPDmj^#r l:[ح_\e yH]İn,GNVbg`$ێ} 1x"]ZS%Wk:#{ݝ~z-V B?y+91+C̜~ 8~vpT{0bqWa0t!OoPk%ar5JKOà0(6~z4c![}ZR4GH^~cŅn0.{~gTS%bk~?Ԝ ܜKh7`g[NE5Vja =i8)zd{2K{uSvxN c: 0-Vo#= ])}4GRT#\)ORԉNE^-lQ]Lhy-=}4UQ9 bN6I"G͋7@ Ɩ?ܿ~?w rͥJ}۫ސhe%?he=^e-F[U*h &QmI_!ywE%-hUo|T &@tV8gp&gX29ץgXf;^Pw{|=ٺ& ӿnrR70Gi/%NG7o71tEq( T&x?ERQW@~<_V{),mw̄-ޭ;L$FBrX\#B1f:ȹ8~rFSJBLV\ c9@!C.8 jI}ƙqԝ٘?N,Rs .wMY]@[z;P7QMM4DL%R;ό#Niq;oRwٟ>?e m6Y>K[3v0虜][_}mIIW8+ӽkm]TѨtXŗT{WrWACR)-RA^%hovϲzobڿإe%pk[ p? NAPo:z9tRkFbHn{_W*$TX^O|/^A[Dܜf)J#xyQAH8ai#fRds w`UE08(τ,TM X%$*^ _D0avj+ 3(KrXG,r$J` ("(q "oym.4E0CTeR(-3"j爄HY" *.)&\i2v5%1XhF) S~DhrևBVYs\"C%5Zl&2鸠Qp& NxluM6B=cfXjE0-o扟SU+5>6OyZTz)..Mhq䴃Lc}d \D}෸X!n^L&D:Bx#tĻS_p~ؗIR {x|onWF[OYzf]WM7-q,/}_ i&%9eMW==#JRJ*7l^z'*HhH&!By4С(:6Q -%GQm soO(md?e+A Q¡ Ϳ@9;g5F;ahYcG҆o%}Qth%P*jk{y,o'F2W]8 V tü/xL^]|v?ٔZԴMHW+Çt W+k7~Dei;f 8#fFVo!q@Z-`$[mhk!,UyhAuOj)"R=bƇT5FajtB -cibm]x3q"w+rĉ\U:zXGB+ )֓NCkXI0F×mL}Vx=۫f~3ۛWߤf\l)47Ʃt嬒.Q(.\}Җ7RJ.=\U|]/UALS.s~/@p7s>b{:%vF2yFR%|'lٻ sW SH4PmZEj R-B6-^cڭ@fXRjw2Tqm ЅdOHR˨DPKj=)CC ˱B2ɬ<j)DCa@Qr1)c f`F(d1G /"3*G "E*!&XiwZ5);敋WVZ6 B RŜ|׍Dg!fzP3 9q%.8`6C(3lԽ>zTEZ"4,H>$\m1T. 1d5RªcRV5D?A0 |ߖI8܆(k1ZiRM1WOFOIOj*i=,@ާ ;bBjir!nઘh,G "[oVk8)IԕUgpջ !}pSJ8hJ {M ħrjL38$1#3$ (vv˚ys.Qh$X0NPK!J䈁bLjXRySR@C$H".8vE#WLZKCB* \dTWMR\1SB:4鬃bW)J/1Lx(5V[@ V< 9 `DkLΩEgSFgQl!Zj]b6R HXu!m=2e#E`Kecpy@UtfU𱦾ZrN CToQV{~W_N_{g 9!L~c\|5HOt{@]s8WTr3++uSw}V7~$kPEK $EcQ`׍Fb3\hPaxr}ºYIjf{<=cB0 ݀co"iBd5|u~_}F(-7mp϶p6.}t8J8*G#*(sx"/m.,ܘ"\"ВF Հ_IqA1xR\i"JAIWS;g(M f1R^)ȸn" " 8mo'Aܲ|h`8(aBלzOڄB?} od|;w[zwz*,,˵%#ĥaȉlY"DK Zp̀Jf4mrNsʧtVYQzi6*/@RW>4K+ܦNOPD5Zc샮@r=[%^Z4R1զ};/JS"W~\KIX;(dwShuַDky;u+F>ӞɖvOURGE4Ije}SGi7[*!6 Ow%6vKݺg.I2%8,&cnN;bۀ<.OL%O4Tu!!\D[˔D[#+WXcݽBF+jt;5}aM>d}ׁ3MУygTn/5o- lLE@DkxAywJor+~)RGARi(͝qU w/NA-e ƿs3p}0|m(֨LY }e$U(1hXYk"j<ҸRu5G;}FTw0#7oߧ)C]!|\cL^qBȩ?[j$$ tqRՒt]tKvG?%n TU|mCj^Xf^ x%mi LH0 XzزD mm|=LaиQU!mni!K'zSB %x@"Q`]M"c 'IP# 1Tx@S\МL<•<ֹb@DQra! 0>u3 *EGiY#$9=BКG xzjHU{%AjDw;"R4{@OoE6MJMۛۏUi KevZE0y]˰)R`BFir9uPyimlݕkZ\ey ^7DUX@~+ I Il%c@ﲀ߇b9)a5ˀjB u:I~#XѪcN7kY'1$y7| lQ0UwNbJ?hsplJbQ`I)첄NYVSɊ\IbGI;v H~+0=/ l|u7!6Zgq=^ ُ"$n=ZۑϤֵ3y&E-l0뤗/I 砛 (kgZ|MsG x(w^#4&?Ei\Mx/_~s;WU'N7ԝ.:JPT#6Sʈ{2]^۟P - %{S-9fTD.ј?}s__W~ZP9F:ͤX=wS,,{,_.a~ZHЏ ½1'zwGI2햊A褾#F.6*ӷvKݺg.I2U7]5G[*!v tS-njڭ y""Scuj7j}1j*LJs!Opҫl=@B)N Ч|+T7pw5~Hי`/X_[P oט}mDWJ/~GP {xqZo/ {pd9Yݨ9{_.iR*JAecBռ`ݠLZ*z7piTRrdC s},2x9RA85s A(eG%fs`!P})H$hVf`yQ(!U1g皖YNRseǩ npk>OIڛ27g/'JdzMK6?8Oׯ>rTtm 3XATZBIC 2e4+ʒ\RqRPZ] ɘ(lzGT4jr*,5#>LFXIE:2W2SdpT(fD*ǭ%b6DKu1R.°29( %QKB\[K5ɌH2@3-3-lMtpЩѨP4/ 4Τf,qJ%6+Uib)WL9u_0G>AP2,0W_-/.ŭOGtL`.(';=B-C&@3#p-LgeY:/Q\é J:0e,34 *d-̿_\<(°gO̖|-A?yxWɻjbTzI[a1iF6Yd;DV2~PTdC k6P?P(Ay#EXÇ^D]ݳf'b]q2b棯cre5[aK7Uŭչu]f=iKwwxMgxPg'"j7(v"-7Oar<_ɯ|sf|\ϿV1#Rͷ'D%hhq2˟צ# ݚ)Ŷ|"mSMIH)!nS[Hon;` w\2PGe?<k 0AGpHrl<LFW#8j@55ײ p g8|BEŭɻ~FP้ih,ih Bt~P+Wpo{{uJp11N0dF 3oAsenSLS+<'Rs0B͵c\ZJGԶ, @N|:Wz&tr~obL&>iHVS{)+N}u(=8CAF 8Wcx׻なx8qePut.q |m2k;h?f $@/.-㚏ԤԪt='hMH/q4s0\U\>M|[Hp.Dw/s@<=}c/_ 8º?Ms4S t;<}|{!/iLzy"s7cďF1qq=(?xx t*6/: N<ٛhbXA!!`؊ ='%b, yJF 鹘v 0[֮YRf4h*˜LRጳI%VQ. ˍ&p IKDZˇѯ'["yw]wIq?S/ϧ'h/6O7lY9/98vn".#qߍL7j>V-[OV^袔N3‹)OsW˝NI +NZ:RJgޫvkp.mGE I r$ta0R ĵ\nH4ڶ du;aqc3P)#M\TQ?{m@/Ikr @ֈŁ@vlݬv.\ӗ,EVEua#7*20T 5Kxk%Ӗ[IR1FD& keʏ;dPcgN*/CXy~n]rSV̾&g_YorAz!"Pcc8}#եQlLE'>Gf2S<˙ I>e$E#yw]TRp`v 5 V54֥5/i)͈ /:Jl $yʽcTmm逕G}' m ԣ'3We=M.75jwG\qV??`Bk8M?R?=1z2>#j.7R0?,؛hɹ#[]Vh! `p#W.+G̟rDjns ޑZ[λO@SW{EĴ[1b++[YKp23 XhqprOC2rOMe ^ UVoBaAm.Y$Gcvywqjv3k+Q@iR SmnGu[S-uݷYٖ`=Ւq"Q<+OԶji&bpEK פvv.KTYI/m"j:e;_BH~YsiBKU WbL而tSsִ.<3bpDى5`7ibo~ i@Mŵ]FZkdou RA{ *-sM[lM WNfX%=^"#'d8#7U/ ЭoZVpK3vbW7ep+=#Rb,'vKFG 5Jm,P_=VT|2̭̒"p7[\DZY@Tk.ߔYuzzle^.H`K諆WZA[ZMצ%xoZTW<$ tǨymX@5owkXRV v^(i1c:}"Vz1dܼN ߞGprw=>Ic0>ݞ~8zݼ;?~$+v:!3JK8S4! a**l"NTg]y%!hKQ:i1"Tm`Y4*bI1MGEfg*Um@r*"02*)V_7޻5ȍZ$5jukh XRhM^35BrFߚ5Bv| n'$f hB;f(uTveYWm2酛Z 1Klq·M|W[ehjJTQ 5뾃V֮6k_hX~ .bE+sZhtqFMk(;-}Ӓ2'Mu׼x-u c+H=9FHmTcJoZ5.;N2t+d ZZ`Pxg#>,uQm䌽t4<5cch~=3,߇LLmG΋Ȥ!Xd3R JjPx3jƓ܌zMqeZiBd_E8QdU݈7g|g_Y{cEڍ)Az=ֶ\) @ Ϥ] Sy-]yb=9y4r(e@x^:B0K5< [Ah[VGH^T%+|p/>y#G td?/>hĮC*f=v@oV`pmŸuڎZ^b*2Fz:/P,#B)ڭ&@"MrY EɴQo>\iwpfi_t PKp(̋kɟIaݖ͚S+l۽K&ꏧgexg6DbO CSԔ$5:IMNMG(oή !VBZ"!B1+pRD,}% si h@>+dz*0?i;n: '$H4Ut>F+ngsNnOCmYe[ca )ۇsK)!S`J+p踥GA9F;_Gk-(ԊU Q Õth-i)')-$zZJ]<:M*Z4"X(yJB#{!B{R¥-NJ<|a CT$U(8I N 냕!J ]V2V^"gJAFeEHf1^m'j>N>Ddnj8`IqD; OJX(-.)*_F>r[Jie._olyJD}6zruԀf FC/X+Z:T_H@*%I d颖;kKW:im X߃wCB4i_t[k?.,QpmVIcgP;U(m!r- zK#%Vغ7cM:?'WhĨ򘴾r)~mpês.iWqW):0s,*!+"6(*p 4 ot7 <DbE`9,xíAn tkP[~kqXyɝC邎.VZY^I.{Q (,[gak{M>Z\v _͇]3q 0"CSe9z99Y"W{2$dN )@’~HOl:^nXtJ8#A *YD2iTk+i5k0>SJO[ p8I}V3]Iu7W}:zz0>#DΆ&>"5"g`Eo&fz{ωωGƠ}MϾs:<=Xh#NhVȟzrLILdUicaJfљ]'a!}(W":i#e`XF2VAI 䁵B9.JgFᛣ*z(wG2̘Y-kRGbĒWi"JJjOs[fQJ kwA>oejFG Bʰs& ^%ы4m rfK.[BbF>i[KPKH{ I$2nWYlq\6RR0Kf.!K+Q~@$BE -'-EIT87"T^Tʓ< )J)Sl+iÐp׊iEY3hA ;<2@cqOfAl}߷M,үL,ud ǂ(xp Gi>JcxSA>AnJBXYJa,@jZ<xS˴~tKk/+}ڦƅ :)bIusRlݑi-ۿ< Keԓ7\[^zˋ" r j׼ǿBZ>nz 1bk iu~9F eC,碼^]G-ȄmgǫzXp3PY|#{_./XE.Y'|VÝѓ-8J'ƾ9nCGgqqD,)s\,\Tְţ;` gaʳLi<ϭoz(VpQ?zQfMu,Pƈt]!͜jytH[`Vmj}f*̳}w\^UoR'YsafLvj,1׻m?ΟYu֬s9ۏQ@ǖpܽBûperHS3wPFlI/uzDA ;s'ِT?N,\;RUٻ6r,W`d~ F^43g^a,cy$9}K\JvȺ<$-W~\sicq.F1193QPhSs0Z+Nyr(፱qrkDc*A F WBiiDXm^J]=, !e[Q++4qg BPqCv! 8ɷ.\ܽш)Rs]f)7q~n&|ǗI>tE2֫X.-E KIuTkb/RH˱ È (P[Wi$ TemxYyFԀ< B #$QjNVk.L"01ƏYQ(9/CU)4ҝ1J/}Z.OVNh-1C>hNT>Eu;[^kWƐOԹUٰ1}@CZ a@Ƹs (xDՂn|Ih@JϭL]g4X<7THH2V X@ ±:/Ĩ(vk̈7|1<^2qtE~(;WBSDM+K&lsJ%T\B oCCH i):6" dpb|Wr4 +Е[p'jL0I1%XS gT!3`hA!r@M+VPp81:GX Ä*g ~=瀗,ټ +=aofW N|+AUO{b5 Rv::s PᲵu)qjcs2jc:C*tG涶>ض&Xg2 k}$XK@N((@-!aO:֌JȠ+Tk^'H)JIo29U,@ʀY!*zD5ݟ+RX%akPσ @RL &!/ՁHTT:0{d5QKKJ:A\8ini>FH;ً3YJ1,u6ƴ3J(oki7=*@)#չm-^7`n=MS v[;;H#A'91dDc=uN2Jr0huinD%B޺= pNp! kH)dr-xrBCNK΍g`H%@QtsdS5; $lsq\T+b*VvƴP 5@C4O1Mg[?\ҐLr/p>ch$RJ4 j7D&'s`!WI*Y&X>-#Î󺨽C?H2l[:AO]dR$Sy֋r07UP^2.v.X*>pR=ri1>qxc#tgqm%Rp FZ FG#%eXrҝMED˸'H4y@Dwh4ݵ]=;C[~f9f@)"#jPfkJ2vw 7;F v|Gׅ͒'օ>||S%6(jl__/>;տG樯mmCXdoJ>wT`2e(Yf$F `$;MxoQ4 q+TK_=Nc4 -F^xmgBzz heQmQ'DI굵_8̽׀ŽX|, l 8cX^"ZW%R2 4yXm#?Ư'Pyм=tmYҨDt7>-_,ߞErc~ .@uT,o()/g+.1 ,?\99 V=1e̹J1cJ}S`-!Fҿv7nmH=GC^1yY)SȪdz<1e%[x~}`~%JkPwwܚՎgm }ZB4lvW[q]HgQw>~AAsٹkYQm6-GW֒)q" 6JK4Kd0+g"ld _ oE&qZ ߪp1Gp %/cHXeAre! )ZmUBWE9ST_'YwKcW?w 0}lxvk5A,X5&ϗm!ߚK R]BU ~w`칒J|̩(J8[8fCʣcַjQ:t@V[k]?6D #w.g64}>2gRΖjPt+YF;V/fr8SoCeuJW+/ukh-lkP;> h%=+Q #+W(pM)5=FY%v@햊AI}G6l[C{n׺ [ y"$SwۣN~v>N`SëMx vZ]_sSItԇZ&UuoZVjF{k/jD1=W*ɿ]tau!ջo5JuˢX5@O63һ3w&33rեv)#jkA_ounW[ iuҘB͔Ѡ*81؃W8]l;΅U&+6˛4;.Vu5+NG?в+jB7r.(®,_Ny#^xljCڗ6 5ƌ;Ak__}V^7j)g`чZW *)3Qv'«^UY#D!< &3A-s5eӎVplIQzV_|WpJQW\N ztphf_c1T;q'< w>{ YsJ4G" xujk4}3I59VWGTDR4bôhf )I1n/?fF#3/;;<!:#9S82u)* =ucJ hx>[}&k8JH\]c&Gn5@FOɑZS#Ė0ilYs -gA;s M%_ٿo[hwrG8.~b>V;^\s_ڧ7&3,8g=xɑƗ=LlC=5v ({6nSय!-œzylzxq-8DX9Oz2OwR,Fvûo=ޑ#7xrņExLƛؠn2}*Zpuywf3W_n&]ߝx-r/Y 9OV߬SoKQS{;;a|"ad靮Қ.ajDzoz9@cٞh^ =;| 㣏?:L{gUW@_޷k+v]8̹X_'o-of>G!=Aǥg=_܀oZX<)|~/ceL~lJ $XDD "3Fq*  HCD+c Ǜɇ5Ci7ulN̘>O~u%bVi[MT'n:Z{im}@^kp9 ;Xarp(BY3"A)G*GKd 㜴8ӞPkQWi$x:oRU㏫[G6$6k}`Q=9Ѥ9YOZ UA}qV[{E'"ur?װy(TvTf 4& ƆI064p9:VTʨ/wo$$VAq#4&u;[^kW e_WJAWtpchq0ȥ٨`V2gd42lU3:A궥S^7ޭ[<[ >X& lL3>Rwv ~ RvMCTb$>rjMRg2!4E) 8LTQ |Fg*xM[.KG$6zkG4»x6{ zcz։1^n^̗-CgGצUE3f8&TX 9(|J,RoVu7e]%{ g<+Y)yQNApPPS$I~DZ/ƩZv *Tl4%tpJhk; 3Fxk$F!=4yP(*pzJ|I@U)j5eCyK,ր=߹U1x./|f7v.1%_~S~#+ć3(^/t_5yD eE Z)pNU~7c:;6u'l4\e呐.I28 h7I*n4wn=tO%Ei6n9$䅋hL@Mvs@햊AI}G6\1T ֛vK^hSCB^&֣nC%bHtJ [yy-USAB^&TԻv+9 G2Ņ#^*)bx *yk"+^HJqT;:,"pQ["!c'\S CSAڄ#zp %] QOW,/h0ӈycu ,9%CZkXՈ`A#8j?-5L`R,i'\7IA/xnb3hٗ^C?P?[KSZJ{>Uq=GU։mȿ> [ʷ`J6Q$:zLxpժ#DGe,ZaUw)I-.v@CVIџ!;"3h;ОɩQKPqq V{ի7L2pS `*ٔG$|BZ!5.d,ENe 3 8v Y- >\qP޲ ]g%xin/i(|rP$d%֕5UhHX }<8 I7qI;VtJG+*YWq*]%J\$6UfUۏ_#؆جѧM-`r)o/92'nc)C'Dnv۫c#Օ9ZnrGxQhu=넖OJ}1 ʥtzo-z%@\9*MYH0w%J*F|jzvR[E/lr5S-+Q?ʥ\ BO*yD (ǖʡFAR:> D)ӵ;MSEB}SfL9`vy?]_0rw♒o֖rYs\]5B g !n>3A9t0e*w "ǽ9\ۻr_rC\k,M!|coo~mi "]-6%vuxa:;q4 Nj6;P9iun/e*guJO#>d?GU%UTo3:ꝏM-a`qi'=CG&F;X6n~e><{s}rÏ9o S?P<aAgjQ Src#> +.1lIB9qxRFu 7 !I9L{|GrOe%Tʹp* m'ViA^$jh**pW,.dp1C"1-UāVfR)낚']@RDdp)fIH"%"rs&`Ao5Fx'g+I6R%\r%q,.e(*H@Åb9uB ȉ wO@e1_ "1(킈H Ņ-MJa-h^yk`?%o /8a1. C!s8Hd);eSn&I9?g*BsZSG*AKzʬ1PI O%OSyN:ഝ*"n'5D \EQgS M}L gBcx2磳˝9 1Gy}wX*{ʊ+@hs _5pC`ZFk]N9uE0NDŽF3 hAs tw쎐y_t:=½c#{MYx1C"x>8fy?uʷ-R S auWR=շM Hě hZpUliJqPKH0qHHmNQ:ܝy|Z9Gno&b/ 8wN@js9OCR97RPj5ײ. N;v[ĔZ0c:p1El{tB1AcNYeVNEGqnA!&DWap`q²=ZP3zJk9{u J gh=3SKQЊwo+ 9bH@iPL(?sPp/vwrpFk"$d.Q8}ٜ߹׷l QFy~^G:2ʨ/= .*lq%=JE4P*aCB5sM zPγ49'zbaii"9 Vp:gwEY4<$7LJyqhӅEtZ_HPvA@߁+u)h ZnQt JT+>@ñ ~;ςQhL^XrR+%ETPk =C/Zu*Q_w5UJ5W oWKpmxoCD o>x}7lKղR@~A;F9Mr~SA'b,&<L&q1t&aZ'V"rHN!=D68 ?ʯ:V!Ak;o*~| U0T%Ejf75Nv3q8o(iEQAh ސٽЄݛ'("KI[W%<%$-Wr=2'PbzUXAd^s0R7NQZE9=E3ӽg]ia4LdK^,bH0f=!^_xu{{.f%w$+bSEͷclZ*#ТW }Lr1\㺀!Ŕ1N/hl)" *'VW:@5F,>\9y7,z}.q<-ȣr!׎ѐ֣p0,G2R(][A&n<J׆Z9-j:&톉]mAB ĘcFv PI$XŎ>{Y9( ;  7un7JRb_f>9HK~73J/+0IGk@\$R&3.yA(rJirrs, .` -M`5cC1˃6c/!p0H`2"+fM>rdVG-JRIg]0>9j=N|qFգ'7~Õ53!K]˞]-hhaR*lQBh;g2\o+A@t1S.V ~&23m ad.˥(1;J^nnW<롐S<&ŒZ"|RƠ[:򵰧82F@g@az5,i=}&킶Їg&;_wm:NB2_ȪIM#tcOv,q^a 1|qpq&򵽬NC֬p~lM >b ?- =D ceDeRXse}nϡ>H ->IMbK` oa,!ҋhסplDz+Iy`s5 H#?s_AqU[/d"[NSAf(j_=YtP}P )%L 4Fq_.SIyjb́+Y+9Hњ4#2\DZy~aɥŽNao/EEsr{r3xjxZ^ڻOoN w<_ p>|PZsj{k>գł}1 eUIٌYU+kr0ł/t3 -ꕽ-C{_cVUl@Y՝Ul@Q؀<þ$Ԣ{#6-!J4x28xzt=^"[QTs& B(uB(z=BQ. %G3vn/Ej }Lrx: m (>4֜RGtpdQwGS,7 eao.)gqv(hRrtEwWUGf(/q$~^(R3x5epvNWvva;Ϲ:Gע 8愶ɥ%\- .;;u#Zh*S1L)3YI1TfJRB^ ֨t01 }5 ("s /o}Q]$ւ^ *長Z¡A7w]Iﲖ3QD1k|֞eC6n|X}Gw]ÀCvugF˝xg␝Ha/"R ܹ(DקKVZ IFNTt]J_r fz΀3q%ƪD"GL=Ѣ:¢J!1!hz-7nEPw7naF9f;uoJ BD Ɗ@q|EpnHihYGQhX+D[D~<o6|,7^HČF2Fʼnu/'rR1" \-w)Wh5ZƕĀxz9 (磇._~', 4ו)TN+knFcT:B/-o8bvgf_@:<ݳ7Q%Xj("J3 |oӍA{_AK?˄2!<Ԅ&/9)Ў W5؁~݉X+ Pbm :ĭ6Hz~50hҡ_s]&;"H"ߙRS>qzP=Z8%: 3kP8߽Ie1L|1-'>"͠UO zIW#\@g&yIBx=z 9a!L*(;<]}(%/W/}=@I.d4QB9zU9<=bv:wUVqꈫ "_j`K޵5^fWy(?>M%LkKDژhk|OeAhuPLjRLeSJNe>d,9W*C9@ +RA,I.8Y 1jxay];^ ʹ;z)切Ж{FR:7$3 +A^j$W6- ֥;/Zm PQ.Pore:9785Tf҂'Ѡɨdg.ވzD~U\j9rz]+yfqYn2!m!@C:#.#N\bTJ"[-qU̺4dX:?d} d^t8Ս'4CMD 9!,! = [+1 l5ܢLVAJ^Ձ\VXϲI|T,=X4ʸǘR>a0?̈J39*3oǝ?zؔ6G̡¦ -7a'X0aD"gg$V$hhꬻ0|al<^swy:" סuhrpfZ4]) }nP!FfQ.R q9r;WvmUʉIvg3&n*7o?(X'&IQe:l9 9TMGPO0R e>HqbY."7BIsZ5*x)S%kPmTyurjWe*|ZkQoL(a$e@L.&c_Lw[E{nෳrz䋋⃺XU}ŏ7Yob%ooby밪:f>x F?>n~>ek\?(ŕGW\ԜlHBq#SkRz&Gnu1Hݎig53VAS[h-zLlN#Ԕmo{ 9O6<ψt]7.e@I1dy/\sJ[f!w#_7Z \@&Rn Ax ~eb5r_ =ejr_?S(6D(](ֲ5,&´Ac#y'vЄb7@-x0_# y԰]_ۛ|y(JI&ΗR*ݰX*!Z1CuR6Ur?Lm+2Da<~#@ 3??Uph١D9?¨ !mvhx6k% jv C ^'`s9pFZQ ׊s'&(=$A@<>pLaNrod1V^N^rŵQEM[+{g!^&J>{77v| w:lZP0r_C_V^{ Dwݛ+S ,&E_1j}[¥ H2_Ӓ%7A#or8i?\gpY5 G^ۯ1F&I˫Wlh"tC0ORj(!iI'Q>ypqSMn4 8x+A*WVF2APV\bDL+Υ`\0A慣sTj @Ns>c06JqE#nW<) \6^e2os.SUӫg*vW@zn̮ q ْÅFb˽z5XlQ1OY<L\^=~yz?="=~I/?<~V`Z ~~sFwx!;zPg8&r^<.[2,ontra /{>E{ jşrdg/vJ je n`bk%3XXNmX"Q(B1IO2A  3z?df3t,J2O }c9 ,dKUJ sBXnԚ\x=Sp#+PX+|8Y-@B b@(w@#Ņ0@ɬgxN=3, s=}B *ż䎜9+fwR+Wu.otV- }xO$p'rNwP*c6ޝLs mZ6Ӟr&j/>8qQyg@,O ('Qbjmf5`Ksloqf ܫ?MWFpqÞƖ*ewi7t+2e(At ?jbAPJv9]Tb>]Ap`P1d{dVYT(Sdߢ4ً54Z#)ɔ##5lg:U"j@`0It.kX(sFrŤ,H%QZ4L>ŨQg߂R݅5q[ʂXEd*]).(yHq!&>}{xH9CSdRW2i!d9eTPZkP% çMQV;PȀXIХc5Y)$>ׂfFII@պJ'ȗ ·EAv> V x_oc|wⷳBXQvv>`E;?pNq՗CLqQay'/v]Lԝ c5ͯVUwk ZRgY.8`vjVꔛn['JI |O]ƮXx.{~{m=Hq`Y'y7ɢm4k'V}bv?0DK|iWB>vvZZs\j QK`$/N-(?*br քB&<"iKB~ la擀|6fhqg9>$%j x}+ޡ-4GsPϡ) ΞQt>pH-/~7Bx4?&<_ F*CE[iIQ@BF(L?sVixMLN:%[iKQ:B>Bo/EAÔ0R!.ɹien@ũ 2 ѵ}fTsswβ7SCLRgp\,9Uy\{Aa֥G T7ٲ[- ԚN=61DOkՇlj`tn nT vӇ$VF&TpZ^Zp.tXP#jMb`\vB[ 5R!?S0LӆLy* $:C<:xꈣ?fhŻu#>=ĺR\XDHIQtz*ls\nR#{ƍW<xcB^f![%7=b7s+Mɭ*\݅4x#g_QdtzgbM2bʎR((cqgHkLXx9*gw: RQoeAƗh 3wa6:٘O<1ןq™Yk,)cD%x+ 4# p[})zhC374eCi2<8ЕsǐR7eНգ;*V,Mz+/t =I@?+̅uODwdDeØQma[_W`BLoi)@G}iw6:Bh9-W?r˧פ`ƜA-Jz4#JݨܙK'2馴͙D$0>'A&R 3Bq74+9]M o..էZ٫B bEG~wgӯ|z>N0n/$<b*b_ pnXp ..?/C?&ze=*X#np5׃[0@@ͤ~6)J+vؖ8SbULQ) Wn8nCƺ]7ӇI,6^!>+ETg?G[go˄t:/4'}Ogʚ9_AZM֑uM@MQ ֢H'C>x; PBaE Z HZ Éϻdo(TpVBpJ 'Dh{΂RtQ pyx`$h&'Ybl(9H @crGgN\y`V#9Z& $c\K {e"Ƣgq2 FPmrS8KXvS',MjSa֞xZgўS'J 1E"_ҀG*(>Bx# &9Ύ$c$5! :iI#:W Ι'689H;Ȑl V ːhWj,>Dh'dc6r8ySS`v\TiT ƕ_lR@~-Eg 0, ࠩk*wH Ks'LGb;dz@9*C/g<_߼7! žzК=;#1迾®VȎCBIszj$哏l:%>^^Rt _Z9[Ўq6Ŋ|PŊ|P{XT93 tYVPFV-cĐc #!^SwT@Zzh=5n55_nQ&UbjB־ñ־MŁ^tXnuwHڷ 2vvP߬i#p4}ѽ ۶{DJ{cHӂ$lnW'KrLjɍ_M1Fg#0 s??x@iR./?ouooQ>6HajDu@n Z iQDZ⃉FjErIɈ)a[a:%AXPڀ w'/kN-^e`Hk1uL;mJS]z/x+րcSm˛ϥw˰V\ŵl]ܜ-"Gpŭ_w͞Y) =K7wCM׭Gk -WU󏳇m߱Is{\n9.-z{'N縪rIe C}!OShrGm?jw@g)]6oVwmWl >9qRܺ%빯Oqv^2.ѻIߓژ>q*Jʫ4sX~-WT C8~G5UwHtj.)"8OyEZ iҳxIZK՝4B$ansmU>/A"Q/OGѠ :߮iƤu#qgb|t&IkbʞwL=\0=aA\9|Xv->Q()29Q qd0cSI/$`)-,g4EpA: xx= ^ *||h*6b=&oõgՀV2uO>}}xV1pV۫QW-ydNpc^/NoM͎Rt)׿"y_!)9!J ձ?t85w _#ю§o>-G<-`d3Y@~c\vճSzؼ}'9=cC A FHF<`UotXAez]?e1q׽mR8"b}r䴡y֛.631iתW%?yxK X+|PDvןˈz7$ >$ 4 He5^N๪ѵKaL%x\kebY޵:wy.p&]8pWRobZ]āˋP030[tf:ZͿ@9Tp U/ 7PH7Z&pD,%}~N9fBnٻg_㬇0˗8탱E UC}~QwNuRjFL`EBdJ%jAK*B2=S&BȤѓri-2028D? '?|}*NE,Ӟ1ɂТZ$; Նߕ0& zOw.rMp_wn60pw}3hCDK+ʘ ~+w㎇bjLQKr#0GDW~:lj g KnW_pqcVvTm幣iSCeroMz>>S}K)f\Ce#nGƭ1˭}15EbvlAF@́a{Uŝm!}i\у3p7]OQk=fk߳3ȼ+Ϻ=rKo%{^Ѫ="x6{A:&c|}1tps`1.n{;v}Ri̎Q oOUyMDp*% qWHB$!@)&H$$MD xrf>q6PC=~'EQ>pl|$[N~p1B<-у=|:z^lV:zV(6 @7'bwZvcC(΁H01E#€JQ3D2 0Cy>冣=OV ƌ1TӨ{ xmVLMXVH03XVSIP hY4э88ʣ 9jtw#[=j.he / 8@I%vq q@O*:g(J5#8 *єmGź||:,M|WdIé9CH~|J렍Ax7 mZv,q,TRal8gW_*}}HtrjN+gdNWz.V0vy^v)ɑ9\4l}5f'.`l*g?VR`x˔4piu(͜n)Mwb hB{.:1T57\"=%.'ݞB~,mfiqsh>bQl{w^վs`mdLeu & !%(]U5Lʪb^fϸXWCEE>. Jc-yq{z7ŇwwP^ߟ*@ΞrrpRsppuٳ&]Muq,hM+߮o^>^PܒM1 iBce̝3|A8^GQy= ̕uTK#/d}om36`#ׂ|Uj8=X }5`fa~X.z`=~O}ى!ұ5tlاh-DMrNfTeǨ!6&'sdH :!~'PKo۷s FӞVt6hTucizy¾s')DJRI5Ig B*t^وK>̱S ˰9Mjb̀_z W0#*a}x;BMF`c_֋6cDpmr zdy[72DRpaf pu]Z 0PJWpWz6(@ FMǺ΋6!cVrqr8ddA!zMX(RT~ezڧ-27Ąrygz#9_!bjEޙb HЋ "O6/3lVut5g.fF}ǠlH~_loz8A cN{CR꫐:oDZM, *+Ѐ,\Z TY.\8Г[4!m4TU/i@ Rc 2@5Ausi*IJ0`ɒǙ5B11"SN8'|ʄqt5n¯jZb1ag|5no}z%Q׸)<=+&ΠG7\:1DPh"5}nEx[ʃV/?7) (|Ke-GѥLl3 T꫋B/ a>ޚ26ue|e9ASߜ&i5ZoTDKM)NW;l6#FS8('U  jFY顪%#쫚qZy՚".ՒbMsb.wO7>~\bB~R/r{*U۫.G@bT2HkAD%{J00Vz";OAqk?0Px~wnZ밋0=?eP7y1I?̮/nBɏ^JKb=43pv:(g . v4%8D@FbnTˉ:qUkm `Y{k(-Y{ɦeOZ}-eh1yM&WW#d6 {eo%7CZ 5XO o`Ig1CYhZ~VbQ؈ ]EÖa(CՠVYeV29]UhEy](gn2:[Є @ȁtgȳ=Y7a!=Giݖh-Í'H+8\tQ;/oWrF D<ԚI e4gNyù(-YkL ũjyq ~wOc[PMxmُLkMjQ<@HHJԡDpMg(q(NNGD0O%MZPmאD..Qu&~EmRĠI?>_n9}eZ)[AۚM\ *2u]/!Ȓ7o뇈y:i 5"fdkŌZN)і~V`+;o@j-|n˾TVP*at^?ή;OTtB}fFQ_y~zgVw%1I:RfzlA?u*V(FhS=&}V}#2^;B.^"(]Ws]j/~y>^47;4AW;8 4Y- Y*)yT9klrՆވB}zx7p\''[ xpǗ1;K<B܀4=1HKFEhXb%AՖ<~s"LCf4XPRwTdb̺`>E7eތ_\-뼃|yg5ëCs NjOSʆo.LqLrw=ҝTG5MWsIj㩅{tCkRjWM\U0vn5dՖUCfJt̾.E^帺m@$%`^[gf.^xI"40քXΥ {}H09$HT^q}ŝ hʹ`L{M؁}JfD6+V3慡 gΒCޫ~{P~]ħ=s;a/M‰MǗgp=^B_ jb幢vV=Up%_J-ZAҐ11$S s2s2s2s̭$cyq) *R1$ r@`($>(8@DDN^.! :..AQ̈#)ؐ!P"5F-WDh%|b NGbDŎ+d+f V 8<]DwvA/IWGkcq l b tXPo& ɒÉٔg X3F0Q";#EA^qsT'N!X-(>LeUս]͞iYO|2xEx^Mŧx|_xOd dw?|~Y{`:ۧ8/7w9*+ U0#˷Ҟ%'~xdZ w?߹ٟw`|gwц}@,_{G"LF́\qǠPz+jD ʍjq"AE{ Pꡑ}wY;Fg״dzfFۍ+AΔ)0^< A *uZɋnwj1*H4,4S5u9A_CF9  7:у}2}?jh)o)Y޵c>Q!Ƨu] εO<$1e̛y0O׌y?[kxf-=B4װcP38 tIjs>1 :jۉLOWTW p:wX?{8 l<{`A )Ζzr#oS.y/O)Ih*\XQ*Θ=EI ETPJZŒ 0@ontTrDrV`)>:uy-!D5&BP )D!f$\E D2(fmRy3+42J7t058\<8L'Zƽc³(=U.S@txO8b눗 u!z\ 4p\bwDq08:R͚Bq1(URxQ!8P-dUZLzG4\DŀK=q+W)H>iZṔ3uJ D]Ϟ4ZիCGDZ^ᨦSiS8[K,` -ZuC ?|7v2J'}T( yvw_; ,\NLp@vf(n״ed&RptE@ܽCE@KFSSd?DHE3*4D)\`^9$(4,"F5A!T[~H#po^dZsAZyg9n#:H~̧w[)C3&ڈ%+I(~.?n͖ajk%#F5~Xo$>8bԈ 6ISa5Ɯ΋>g1N2y.Lg~֢ݡ~d4u8ZgMigR+6<:ΫheZ7HI*TX%+΀ RdQPcSUj$I8jnRe$% x^o~K0X^V_nU/*x}ӹ] trvq./ %jB&V廋8&)&f G {{ dc D *to}a?ٛy C|֊[ZäR7) r7,9D]\yOP:m[v~jDZ] `Ö9B ǫˀ#erå 2:J@mQ.=s/_>e/bER1qh{;8 |A,45.r:ŸxL{|`n)*XZ0ɚ%p4ÄQ^S(HZЯ+>uP!890,ufq׮W'sW!A]lqFuq3ofoJ0$ \7@"bNǑj*-LB)[J=$*,WHc 7*FEi*ܭ\dsu_μ< OhѵΊ<GfF褛¾ "%Ҝ _73;Qm|!R-,a=Zt]u>4C Mܒ.vjw"s:9WHuEj&JưM4&X+P4z7?5L(UFhkeX -!C(Pmtm(;7`g..ڨ}m^FA|nn-5kԾlsѪi+t\eʤ }|_U d"2$Ee h[b'Qg1@(F< R 8 *)&+?ܮl|l&w%Hfel2֍_f_%[,l8 KEVp1 v :X߅~cF@d6[(E腒p\΅BJ  =\fA;LtC K4J юliW v4>LM,2mN^csJ%fv&H0md=8x<+'A6{$Obj Sc}4wףǰr]=3(?=*Ÿ<ʄ^t u50lg31?ʈFazu1ZGѥ>q+hz=#j:*3WBv2cF\<$tះyKܱO8 RmKS=.eC;9:MO_O{'oݡ}Մ%X_,&%RUT]ͦȸ|Y3]3˰ma@ĎٛfLYܩsCՓA{ZiŜp4&EB6HM Rz@|Z*mT%T$2T:ihEI^k w=!f1pkSMYܟjb(o6&$ە4G`~3kirs^- Z1.0k1{LWՕ_X #fh i"qw&բz51AoV}g[Q>X6b,RMj-&ݕZߧgcpy!c.8Joڲy~,wcȐK*W.SR7Di0zm\My..9\@)N7*TJwQT {&=V_^NQиCGah%iD3ʸCWy^}7zYK#캐]Ժ7g4|Ҡhl;1`lzlư7.bYrsTdRaU[uFu{cL2ȰFcywhG2_I*؎02vV kY*qGO|^9̼07WxgRSbkn}S=k JN^U~a2M>_^ԵY@}Vh$da+c"lAB0/FtY&y4&<-'-f"vQȎ<%ՉX]7Ix!%JjC #o4>X29гla,̾2hv<¯RnPζpcŌamq"dO2uIWX2&isХC8ӆxP8{X>o ie8n鱡Bs:7obisep"d=UB"8 p/2f)&IʶHz=]v$'ܓ9`cdz_]`^?R~O`H=٥3H&2]M-bZCG5lNN~5ӓ=Y$Y'{1h:V['M|fw˚~|IG࿘ҏa8dTg  JQ%󟯷PUҚj| tE 5WT֡~р5h6/݃+!dysI:ƒ JoѿɶkΒM PɧU20!s Kb7LeZ1DKB@[&=<VԱh쌵y A$]M<>m9'Izs!{'hˉJ-!yBҞt٤KSns3DFo l2LKØ{>3D̈"Q`FQ*Q?JHSؠ)"h| `r_;KW=%g)<ʹ@"~G#=n,_{(sL;C.]}H3=Y P+8$,wce-'CĖb_lU=_VKF .mehWB1V5lSQ{%fsڻ OZCUO ;<!'4d2`ױ􀑏{|rlх:[s3ѽdUG)U[GU&,<7 *'Z.ـ[\2:(nmvv'hH̻ɾ-C{gfa5ȧ#.0Z3FJ$<=GiS֋>j>~6TpV)&|f}%q.1^uFd[=wc󭞭F]g\g F@Ä4Lr1$؀؄ 3w|Xla4b aQE`/ y5Xk2T#ULKiFTӖK!7VU=v:-`:X3NT%UƲ;weqğ`V #Zi\Xe,X$ VZT( ۀkt%e zjNЫ/.Nf LekGff_-(`?_BW%Oۦ - h0]䰧 2mb)S#%SOyeETq`b {Ͱ=! sk zzM`pc%Ayf7c`,&eM VĶJ-ۡ vj D1֖pD'-10} DNprQX!&7Eɜ*DXo]Veu={`V(fğb}QV[;'uO&$ @;gYϚBZCR2<|~)U|J)dbUx#=!a=kuɸ pV0j 7&՘'37WS ?E1{t thǮ0ϣ/kxJt:== fr}tY9rnZ2K5j@TB#' v+Z2]z_/'hL.L$5cWO,wF8wWp4b L==7xo~l>:9qg.x'^.$~A}=  (_G櫙kP2ԓFACy1%%r2z6w~r~("ms~ltp}G~zr[jF`R8shnX>AHb^3B!TĽbqܱ&vl;j Hh$ET$5ǚ 1pXLrz0vҪOV8gݫݴLZcY[>$:V},y,ت.rBm9J)Ԃ&h1Zy~Lc!;qRH҇7ƙdc/E?lUuVD8)R"N\+Goܝi<ŨIZX#(IֈucM m^VFa e!p !mE )Ĝs'2sю"1s.#0*&װ1:T O_q)䒞 N>VJđ| *) b+'d1#=X" XHK 68$;&(T R=6߼I0sb9X  )ᵞ!"-PLf [k=suܖ \.9\LB f;[~g.d ~qrT;iVѢ@q9"*qD$ÃST,J5A˰A _@5}=jA8iV,6$JJ&=?&48SDQ8gG4>0UB)#^)7*ѴZ`C5֘Y? IhD53%@yk.I#ؕqIZ{ Ǿjܛ(=;e'Aِ[H5,nКOuК )R>;D|D5+t M.fj!J$_} K2-$OA(MٻnWXJvqy)WR>$..3L4f=IIG qnd71Ğ޼f߼\`;cXT>kB^ؔQMuaǻ)V4w+ tJh݆KB7V\ֆp-)iFe4U*лbc:m4nnCNkH]{z6,䅛hM 9]&(bR rL&mj%yz6,䅛6eЧ&7JUt{<i!\(Y#uwL? [1??.ןrQH BW! أZB@-oΦ|Sjqb~[$& Wb #3f镐9J*1Jz4Wz"/_ L 8tDYjO$DGy`1x^RJS=n">ʌӸ@}h\x+8<>|{q\;lD r?(OWpr޹8t9iG|X)3gr >"[K &ŧ#tgs nA(4 3QŒTrGEl+6Ó?uPkAqF;0γS1-1O}mtaimz7>\/g>D{w~uljXݖ<3_0*u?fXe'".2UtOɯj)>-w>xuOI5}ŻuBb;<|Va Y?ߧ\˔RpR .)7`Hx\Yb*Mz, uGw׶ WaNfכĉc+'L2t\ :G**ŵǾ#c49,0*bȢJᦊ.-8zF]={cp)4>)Y+ǧBצk. z?!:]`pŊqrp FJM=ȅG,Ta U(9`łPnZRI\^pchF (by\gOL& o9/^u'GP*+DVZKs2A T81lNe*E# *#~K5\ 5<=p%yE>0{ͬTp'\ou.*tvr3{a'~&ӭ]~z;{5n~"ڬB~o7mt^{Eo3mo漣n%F0A>^~ vuVscV6>,i qcV]|DKgWM1l(Ms;Bl Mn SH?ա,Ħ&zfԦ&zÇj?6}C37Ж!<;m=pDzlzmQV 9 lӽkqm4ѨdDO2OR1(sԁ*$ub(iԔ2d^eZeא+Fj7W&Jied9E43$AR\!*>h'mZ"chOJ?.Pq:( ys BuEoA/>Fty$@(hS 6?kAѤeT_-#LA0gl)Ċi )XGk)]+^5Z&uYp)fU*`qhW$N4$ n "J 3jFDWfdd.=+*.9"2RhB"VV M-|Pp$$"Qٽ C6FW9rpZ 턤H'Ϸl&NE' $ﮮf}g8=|{0Q8hfre64Gާ/+RH"ӽffrAk|Wؾzvއi.ݧ_zラjN[#ÿ g*t,jtJ1B`UU !|ASǘ!^$;gBiFyeT\  IRUG~s0(CZ Xp6-B Dg- Z*3|&5boR޿i8n fhҽ>~' ݆5+1id N650iSB=&u^pל(;K\ҦN6)r\?]'(worJ}{֦;p:b^a pEӑ SE۴Okz\g99hk%ڳ_|B3ٌV6)|J\tSZ1e8rD!;g<'}qD5`^@Hd2lR:$?J+ g߀ `׽0n' DѮNgnu*_?xX>ZEܬ V0Wʇo}Y>~Gd"KǻϞjEis~Ee0zhcE$(JV9C#srؖ[A. W-5}\2M[k {Ԇ_i=[3?1,e@1 ÖԒy䀥K1ȩC[FR:/iRqf6,䅛6V3BοFO! ڸO!yG-4& fsjޔ#[mg=˩%Ne ^RktDu^xj ᚂ4E/mŜwb%~!W\vWF>vedHoU*B靧Ly!~+ȞOofmp0&' D)#"&*$>.GmIjb9%?]%= :<+ i 7'K]U߹ALX0&,8Vnxǀ!nv>W4؟}^$dnng+2щY.v56$\f.:k.>&. EV7m_}hE-E&@C[ܨ}uҁGwbMշz:pDN"~yڬލ |<ʞ^ۭ$ JCcf]=^k{"ڙC,Q; bΩnx ۽ R rR 9ynBL($RI!C8 zޓ7cP?.N-1ᖘ1FW:m!0c@W[]9^Gb@**Ӈ=|.hb՗}% {WӍN:pI{kTP"RAn'5Th!4kT!ѐLVMNʑgC&t=A {#s8<9|`4L> $պNY6=u!"dNGh%/3X'ZHCX鰤T|u2qXDRsÚ,8FO BTIiC@ E6#!؟X+!rd39|4 Z ;|?w9R-.m7kpm}M`D49N "-ஹ6L! /^ =zb$%!zi@tf} 셆l|-{x|jH|A} kǕ QF+Ջ' .FhC3Uc/3q%ѷ/ap=r_5R˛e"*onBSygHJmF[;‘>SLX*jk#ZFiF(~M^P $AT8(%=d@|3H|AxJ[4(T.X36\I+'A# q$N P9UA@| TꎁmPw SKجd³޴{q2IQE8OfQe=[s2Zm,#,WJm-B *f13АX,X,1Jծ?=CYƆ@(o/7BRz4 "B )TS@ ŗdT j)Jy+.aQsAO71KOW[QNp_KDnAdfIb+M-D T$~Ukm0H)8Tz!eG$qDu:v`ٕKcM:+t:J (>͑1 7|J,wWܭ*ÿMO*W*տئhD /֌?Q]܄X|DbTdelXlڦˡWIE|={j1+kC)Hd(U/0?l'j4[jTnu"T/467:@.1!?1e#q}ow4>x| ېSWݾL c:pυQ$ZFkX5GMWbIlKm!L1*>\[WłC.F?\b% RMڨσi[jcnm =BO"ITFDΈ )< zݧ BUǴsy&%ָ-*IOb\`8 "a+-:,J, Ǝ *Ӈ2AA\2tfzL ظ?.՝c0Fvxm_Օ<3ls?iW|L=_\_xI zŀjh/z 8];>t/5JDGPR}NgM9T]_Ôhl/{OKRL{R묟n9XQك D#µݱ1U""dρ8zo~TNXC?(/fYl~tW;GERIe5JLש̋Ǥ ٧ӥ $Xp= é@D""8K#$,fK8x/x_o0aE>rA ],F9e ?רb )|(4/գ=_\N@z^vjҲ~i/{k1B>YʒA1W Jƫby].w͇e.aˉwrU/0$tFd>Zc0HòT7z쥇E&WI]B }k1 c.= M ,StNg(/wf!h* SS 3]tB98-qIҙ^Ѿ 1pKΐ(LWN2J([tq_"D%Ib!._/EBF T.LAtF:%-} U͛37GY:&suARL$'R}S)`q빿E0-{5V2H),K J# He@`TO4zSrJ(cPX€i_FůcP7:s7wX/O*P_lӔM_)PPoYi(F9uƴz׵U{y+IJ‹F5>M#2?w.X{LPW.V>׉;ۈv⻞$triy+|E>;s:u*Rhn !,QGJST8F.{N˃chYEȖw@ohs@_-!Bi>;*B*&= En`8NJH[2<\k7VǎFe%ٓ:l4kPxU~Hs;U 4TS7g6woA;$'wqW(`wWpXվ@r K+$v`vV4dXgN{"8^9m/# Ce,u3oq!/bUkiVuK)hzm5[?駿wDU<,j+-x|>ތv~<wG$GLmkY5^X:"nmm6pP`9D^#qSG(h;>e6W?dlS7Y2̙X8MA,yz0BRTaeyLU39 dMy ww}nUI`>}ϊU1O7”bؾW$ѱHүbsKs+|j/HgkK17d$dit ɦL-lWmPH^vP(w*zŘgJa PT sh![#E^M+MP 50ے&8iow{m˴ToyOױZ~|Mqx(]?,e͞Ԥ]gˤ26FOFogV|:xY~|>Pooӟٻ6nW}طzb/kENŦ0xM-UA{8,#.#GAH#sHsHo߼>|{wa:>uϘ"7=0b@n>^|9m_ITwcڗ7 _{F"L _?)$ή:Gpݚ}2_ۼsݞy9'@s!(Db儍3(!-`ؽU: /\AץJ2}hCQi0܇orG.3nLľk0= 9xÕH'q09OOT_Nb:%?%:9i;uFބe*K(#Hx>oK%vsKD *phdZ&"l/F֭6^ߡF`l?}uY/4pljtnw!W Pҏg0 ~dLg?~w|@J}廟E{P.(znxt38gy <gabRwގ_Mx+6A'Wzniq"ꑋh_)E.YJU& )cTK-߷1is2|+h:)#gY?(w4I8(BQxb&CĒw"pS[ tJ}EޞLTFGX+R 2+bFkF-Dj ,sh g:%2ϪHe+BZ j q@3JcƄA̼B!sn WGPťD3jTSxGj^<T*I5 ^x0WGx%kK.U樂T2@?BNtY|eD5[_o%gY(մg,u0,Ap>-KAWndCM6TKcz)OR%ښԎec ug͵7u>P`WV1WVB/ex@znvr~*\cvPkL֟+Y}pD\ʕmIP0] O*:*n>[oҕTsjiAαirɰzFsyhv7l$aw+aslmM%5m,<tX )0NOM+w߽:=V#| WÛ4>nʥ5|(=Y?nf"0ڠgH q"hkVmhVbuHcC KlIiwJKDaUVDi7tM [u΃?},CߴFs_Vu7IqA&w?|jF:%zϣ JPttrl"92PpRb@I5[ h}vL@25.}joKae5; (Vyv:e!0srjԅ 5otLWucSF_޽[~"Rzh6*&>x~*nZ̢b<_ߢƦ5ZEBI/-iMIsU܊Jx(b$e`!Ү6F !:01 & +/NQ12MaRa\ȴ'$ !1Csb4GN*gI1ʂ>h%~_\B*PπGS1Y*OYepzղ|T>*qU>yyMJ~ߐ;Ai@]uȂa41#Vw?Chte=yc[nr&I)!2( P xGޞa WaPBaæu/}Z]$7ūp_%Ō%*.% %۔/ Tk2 N.jr6C+J+]k /M5ƛ[$xeqppmSެOX)OtH"v7P7r Gcnf_t#^M3[٭攨wwIV;ɑ=K+-rqH].ٴ]%Q"NC J\\"b mv}.ÿ>V\=dU A%J"lQŹ7džKJ_ )u6/|aJ(^^r^a6\VOcNvd_z*yc}湑ɐYF_$RYBj牪 :UsR>=, YJӳMO t1wjT$Jł:!lWj𿕪z6gNޕʖ)D=9 ̝y>JKnRX3B9y&֖)CS>e,ٵ.fm*Z*.و$(:'pL*"՚8`JW#s?Z޾eơe[`x-,9٣V3cy mQ 좀ǑH(T?.tsP{2Yra#[,}t&Vn1Q}݊LS'TG~Y^lU-p.SO4ʽf' E>[V@yުśŗnQĹRxɈχ)dws'B%#e~I/Yva,9.LPJ2/cɱi6/"bE?!OjjF?~mNjc ?&JF yNxbc@Rp1Hz"„H%;]k:|38E1Ure.}u+H\ Sdw su kN~~=1Uarп7~g*N}39I%j- eI&R-j/ہ$1_X_hE@S;?~뷻 0*Wk s2A4d`c"GDQ0n *TTjc jT}Qhb 5m\0gc!^3}/ތ-n ayÌ'Ii$esЀ* Q-3vn*;oiINu>s׈ %Ʋgf*q{f~߀I%B+~OGpRy8rYLoEHpR 򚨚=_*1JKR|tkJxk-`:ʂbmEТn6TMDTj\g2QM3(%CsvW<MQYngc eDgi:8Q)b)1,ZLq Hi+J# .⸌NC*s ׂ8}mw3w=CjF !+Pڅ cΥrXH3`LkR`6󨣃_,Hꐓ 'V+d5$=!ɴԜ \|A.;E~8|+ecLه8I|2 tm!%u{#xBVhB Z#t.Q'[zktݿi:uԉwWVSyŜbF|!,($,MWW^疭!2O ^}@W:*Wg):KYʯΊS#\ȤBD X)$TSq{KmDQra  aTU+N>=|F)csz,Y3 ʤUsI2jXb@JfZ!y! XpMT#f%+P ل`1(g6caK7hnns8OnpQJդ 99),9V~{ӾL&ADdgb'?o`U;xЄ_qm qlUWWP> 8C\hqyO,|)ג* QU 3t%}ӭ5N  62, ',- |Z& $JO's*I4Hj^c%c/&"q"GJG(cJ` u&Mir`N C`AKB_nUH%c1P : Ԡ'pjyt<«dqR*%J% mZ6` ZgNFέ:hG<(.\Fkڕ;}В%ђ/tRxf1 D]{R@  ʃ7:cyr-U-.K+9'jR ul֏ÿ{By3DٿIhRlvLӦЀ״f>*D8 &g`j!0oFGO)~;Ĝ`tjO&jk ",ٻ6ndWXzٓ'vN'o0↢7?!% ùpxD'D鯻BR LgfnWhNSX \J7nF^IOל~Z_/<8=Mtxߌ3oW>h%l5[ϵBg&>7P*N8N?KKHGU㰐TYn})[򞈌}s lYP:PS>˒A.U6lإHLBX }dnn[ƥtI n[\V{ Ls=PJҥ2m[v?h@mQ Mid֥?I8YWOк% a>M%ufwIJY l7/qH:͊_*.*s٬fI4*jܲ\!5ptr_՜ l0 $]|(Ej_ $:)Mq+[ ݡ_|Cײy*8m[ˮ$KV\vñh F⡱$h Sk ^`/BP?4IV77(LVACA;ʶޏۊPuc)Lu!*O "#igLm(C( sacOSjR!Rff YWHTU1SF *`#ʜU[,3RFT rSǨhYcpEh ل[ۡQGҨLjhqr5!44x' bp g3%0wNAba_#  4y |ےYG1DAtyA9 FX'!}0ao? Ō2^ .0fd||*0K,Q. kѱKO-42%E1xtR##keՊBsS4^Am,FC^1k1Byu~fwo9𳽻i^Ԙ^&x|vX`kp5^ݝ_x:cZ( #e.,+a~~~̝4Resاܭٯ P$䅋hˆ ~ߵn:5A4u;;s]Z/kڐ. d laf]IĜ[T^ |pP7fL֬3&7Cd7=pՔuY\~"[١lݻuZ7[QLc,Xey9p5m9QݍSq({%Bnp7w_g?p/{F m8ywߌGftT1};<=S•֣ldD%ҴsD[5+1|w&~wm'ŘiUkQۙB`W[%ue. 6._au3&P10ˀ:2yS;0#O"mo_v=JC fRa~ǽ# (ks-'v-p-("V :[<;xsvtmFd,8=4{2Pr Ƿ5܆4Ax8!BKAwA嘄6"ũ&!T 4 XiJ "6-[qɲFiVe hQT:k ,I}޿;LQ<c7ƒҥ•qLȘ1T#9xU<N1,o*KXtGBXi} 1'sdtI a _lo<=GJ0ZO?{e+gd[*Ƃ! z?G]1gs{v@Ơ6,QNyӥzRR/WW1^M6,f#@WСۅ_dk؇o Oh_oQc,u(η)XXS߮T>XNK41xq\?6[7QFh)J+~\Vfޝa v΃WYgX]KhxNלhU ׶h 5eՙ>K@G@8h=f&b=?))FgÓz0G\`rκ#fSLR KI,!ɥ7IIRz Np?x@JIzA =C$N+-s#\+sy!WD[[YcF1{,V8,PS^hxi횼r 'H5ִz+-Zai-p; R[#4:e41hFދ)_8q)˗_w1Tp3F)Z,XFß1@v+JTk#ZCd+tp[UaLz0[aLTmɝ \r%Ovq~؍$b&ӛcƸ1b&9m]g@T{e%f-%RډCòIVrv\ <ݼZ 1aK Ff0,ϳ[?[K>9?=xM0$G>V-@Q!pKz eXà/ [`~KTS ]89&d8*n=*D[B"~'0Yak&'ń䤖J«Bz+~F˘Z3aV)ps$jʔ3a4_mSMLO$\L !ǪTV^Eqj[5>U":bW:B%"_ (?OXe*gl E:vN5) Yorp,!̺i&|Vn+lBFIpܬLnAv j@q3TV,P(u5JJE; ҏc- ڹrkW8s6w[^.Q Pi=JP #N Mmb*ܨ WXc^[P-B n.,g+DvdLtXX-#a1#26M5(a NOѷ)bsTCӪp pRpOw/+]#+%Fw*8[9C8(h.pr~fM*wZ,R]sғbшΑߧӉ)|1tDYݒWw, 9&d*S~2s6M-iW %ņ \FÕV јh9З@胞o p⃉}Y%uH<rp k>xJՖt@ȡel "CEfd@K !!27 !F Pż%#y>5輴Mf9g?z0x/tXOUix(,`˰zn&3U}tףyG`g*sINV7k<ڥ5ui{O!4S,x0u;ϯfg߸8(4xn6G~)p}0›5U );WG?N)~+pFx>OຼOq;Buo;S\}7M7߹ oꐄ?cIX@xZ;}cɠ SAdwGF rIʏ+,Fz.֊)ؓԧy3 &M-S20 V0j נ.X-z _;`LShd>D<8rAz|OWAtukMLH(;.&X-ye M9lo|\9t\J?Is̮s =wZ-ㅵA̰P`3 O Ne3œ'LR:WkfXrͤ\&es- &gIy~G%ttWqyf܄vU RR1wl⇫83191Dcb8 :W' w6AP1 UQ7] .J. Amlw";,QXyx|ϋ-.PkoWki{\Vq*n*{vJB*$>KUbI3sld\g>dVmEJAX)Ga-Ռ夃PR/mb;fpD4iӪ(T;8xSMUjA)TzO§L#.S.Hj'v4}QĎ/X""q,BkO;FmD͠:V\CX 5ç0>l*֮,-T! Q?^˳> H$ا1Z'`ƈ2ZfB x6DF  AئFiew)cuZ=S6~>)j,$I63..F1YZ8-K͋*2k 2Bq"2 dMbcˍ`h`>icR* 2 Aea u<$DsX9Fa1Io#VoGdD@I]gM- Nh{.;p'r.G:I%^D5)#!ˈ"Hg=RbUdi8BbZl@(!DZߌXZVA"1bgZG5|i&ǘcMDbD!@70EL7'BY*^Xr^0ZmQ?bͲ h)h.ZS@#J1n 3BJ6efQ[@JI,oJ5$WLbRaG|məSГ`*J(95ԹP[RզIxfSwjw|d|690&ơbxR`+g`Ki⨡>HIi3,uyNHnNW-A٥[)@ׇ9g krE14! tP]Y^D&Ùp O@lj_151, CMB)ac㹌t ,FI OpCԁW}o0Rz?G52g0ªgyV{!\`O!v%H1Jթ7s薽)"`ul/.H\LܣHXh8&NQOF| Vy`w ks-!3)[]J%+帛^o|^D-J9 Yz_0eTY`u]cW?;&3gL?#wcP&,.^kk;06|0nT|~§!Q1 극څx8GI?xcO޼?Uas>`+Gҏ|x0>#7FV7=޾1Z7,h7=mch,Ǚf7j3'Top 0I(ӌD Tݨ@OpUHKZR8y<9Ic&ñ6ʀKI o-e-Wy>۔g`rwL8;ʩxAIxx}5澯}âay:ʆ-˯uE9mU!y~*"I:߭zvQz]:x\&Mgl@ODf觿2C 4/"XA/ay< WJMԤʼ=x3ICsVLѧ!"'&y v%rK 0:=]ьlqL+>a1^wEV~pMc9eȜj燋G/!_f,|kq]zoo"o_oF~^zYz}\恞l\DWnfh3U}~f [0ףܻ3g˱gɧW@G^j?`_` g~]WxPZOdX4҂5ŷg};+1v>bl8|6ϾG]|~N۷M?T(ʻ >̯S`~AߠH&x窇z5_9(;›grBJ <Ϋ.B.;2gzgPhNLOYqmRZWĢ ΍(XqQ6AK`oB)wqsf7~Ot}rADJZ K5FpII]Jy"NJe)Nsn%ʅZ$$䢁X)\*Q?΢(eoN2S!6"Mޣ}uýV+Sv&a픴x* fC̴tW;.ZK:W1 7} [pg.BS'j2vƉfF.5C1rY%qXX}Qd=h ~fH)&u Ok81zOmVQ N~F~xPИn ' `sELEh LxvD&O{+nnګS~E7Iu$F9nK$Z%9P[>&IR mL 4&4DhTmoBlW[;JYWx!ɍI^vfg}hCI3XaY~Y{;P@ݤYly7Qţx5QlnuJ| d&y $Vp„lRdl7V:cʢ gZUS@a]4w2ʤ JQXiA^S|fAXMVYmm̢a u~LIy;q.ZI׬#lpp6Q2xH66ZQZ!ʌ2ΧP>ܗ-r,BLrq?ER_~Yߘm-9ϟ_ծ,9$uy:㟄ʆ+!|]wAUrl>&MmhR@ ># #fחUgU_#+Jԫ 5c\cx3Gq2'S걐k_<:{|w^&XqkS${ g֕u ."I{Vp cI b%r ebA{Nip 1Fk52 HR=e!2#`o 3gN[0ssLEAu~߇&uW/HA8YB_<_,%ƻԁ!997_ܿ aI0_ &&_yyu|8RT썧i!`Dz1xbxzȹ H#B^ ~3у=4PIT&COm/+FB#PcɌ %eB3;)8#(IabN/G#āRUZ(> =Rs3Z=%)G"uW]1W`KvՑ!AVը"*ZTJӇqCCtGBn#:D{OBꞢ\`& K$%J{f#vI.,JI8kp= >-3`FC 1aQaqO^^XXntd 'a`}73.h. ;;8Q*_PZPimXdr_r!qE ?n7FsP& ?OC@%0)yjii>A#0u Ȥ4p&L,DB6±pfWD AI Ȏ)4X*z< +Nqdv9LLa`gX%:hQ >rĈ#4J~wlI! GyE=>LB$ꄸz> {#{ʆ q]G`ŠK |g`YK18K1:nW x> 0chb!0Xf,FFB-CS̥ %ڻ*%.|L +pv8˗`^t8w%]+Cq觩Qǹ؏`2O&ᥬ3xrm1k>w{kS/)l5yM_5 u \3ZueqAU(7̪mG{TT`zckQyVQywV)iugwq7'^ /~~MK0'%xpBt<1ۄ!aM6O0^!`2_3ȰH:yy855+)NjLm2~SZP* Z~=Uc{* mܲ8+;7qI07V[)nUg.è´:tpac[]ƥW^?lo-ҍo]KՃQz/(Ϫγ/3l~,TFggdcݞ7(NrJM:BD ]BBN$J<-i9C)ZnfP˜N\tgW(&8 %{vY/,KA/r~f[񛿼Owot</[ l&^)y@qӣfUЀ kD]u]]-Z<0"7|6f@k.D{f?q4A(HJS̈́"q9L J^GJN-)-&gow׎īFx Z5eJ0ɩD!$N;8>005,טS-%gց˔ g6XB(%XE5Q܋X )ı# %"Ɩ)r>q$E͕$%-o6>5vj]ztx[>8exnKܷoR!ceN]xJ+)aǔBӼ o&Xy(IX0r{NE< etc=[9[TXYki/FkZuٞtbg]P5 f4}GvAxxoҭDc[+|&]Io޼Ԩ|Gfǻe+Z-,mUjB8zw7ʭj}x;Cu?LP6`F[9OR<"X({[[%׈ 3@uLhxM>0ѐ=URs"aZqg2axAQxqx.h%jȊ? /uCK@P%οVB9; SI_ RL.H5Ư4.eAj |i\RQZ0BXG㺺-BG U=|x~=p>T8#~rz]2ƶ^N `:)``ND D" ۃk3jkV[?+'<!N.ilإhis3`Ƭ?@ʼnՀ\`OJʅ^?U=P2'9 쉚ѫvNv &Vdž2π 6OWGvn;.:ȍ:)m֚t͑ ɒ~g}Z좽m훋)o*揈q|jy6hw&[nn(z|ۈk57u%oށq(+P߻Ň71y1o?fU-CՉ85*];%q$Ʌ\wSm @Qf0+ nh2NG˸Rcjj qf)!kChqYq/"sQI\aJF'L ~~fӮDͦE[b-BvB]Lj}M{(cFU3Z3~i%)i)Imrs F(jCojv#ڈ'BsF{hSb$%~ք2.:k g$)Lr"ENeBX '(pO T3@e ='3ZlI^;r늜glC^3ԭI#J')[$>]v֕bWmM+d sYzq+#kpH".5qhߪyޣ }P!}o3Mt}2-1]4x$~ \TD2![ӫ}^@-p8n[Kg'6o(0 :$\V5p8 Jj(ǭrD2qZhcRт TcfӡV{FPťyXlo)2:?/K)Tm^V>mIU/ T\Y*jVtPA_ Ww9ѐu4MyʭnwI$2屛tv[~8U^xS2F2N;qZBkO瓏"_xќt<ϯ^:$֍Ag9O {z ]1_<}eRp ]kQmQ}ǿ59{3č6<D9ϧ'&ӫ'֤p)/}"5}"9)#G3R3kzy r=ܘ쿯o.o F!;<'ڊYdt߆C)v.˷wmvQ"vx]a5ڢ_UDHWm|e(|FpC\H݅}جtS.rqr^=仓6ssa0LZX{78L޻=hT?ԙ=7ƈLH"7܇^`0 F?ǕOEtTh6`ň1^Wԟ>;!LJK*Gt1*r;DqTՋ&SPvmwV=_;8 [*cWYU3:r~xwmBJ8KVQLyrM*FcH#/+|=Nn e*;qgm/m~dQk?/'uLY rrxJFD: ʮϲw)e̞q #]KO i-gzjWƒQy_YC\R4UrI A4(T;BM#)xiw)؋)[T) ӛ6< 9蛟6T $_^t (|˿٤\ȥl\U \{ACFKN6wT̏3 H &IӖweJ\IidਧɄdEԊ+"D)ST;!)@_8JQΥ0ؿv/"/7\6^]toyu(s%4w3! jQ>}*w>9Cx97wFybEP=g9#"b>kQB-,mTsx$Et&6QJJ{-y(EOk 02Qyzr'cr9]{$Gv%HNH{ ǩeW+L5cξ>T1xF4vvP<8ۡ`b]E+:v eCw j- 8SڰLklfA2&JdQ(uO ⵳^X dޯW+l|G0,#XGfۘL1I>p˄wfC3+R*:Dl2Ǩc 7` 2 0\y 2zZ4/MDۂTt%iԯ9jzz&U=D]p6^6J}0{$TjJ? "|]a2w\3Ъyw'tZޫ( CӷG5K(y8:ZG=ጌ ?]]XybC*KO3hPs_?xx J c,QDw[KPwbIn0nНj8?p?+:?~?($St*8MbmBPlkjL:]vFūKVv4oXL͎`GV8ѸS;5'Ѿ f&kWVz:jHPp^9e#zsa1 W;91nQ_կ^{F.7HNVc Y9DS)|&@Ɔď&lW~oɈ{s\UK]E@ܭ0a巎E0rޚNݔ_+; SW YldǛE`KK}Sn\Di[H+2ߦaCHAJY1XMfbi0@7oӌ|ZuoҚUBɽYMCmE r/oѭҔ[lM3*0nCm=mMu$i Ȣn}>φkt!㯕ۘp?vfvfvfv6u?dSRD#O+U( 1h0\$/?T*#|شJHWٍE[KwEZnb6onw.nbuk-@fySHY ͚'茒'+SN!2HlJA҂QH4GJΘR;\eJ_ ' x?ɼ#RtzEr[u&)՗tՐyЌVp@D/ECi ">$#<Z! *5 uԈƿD qKI _N1D2y6y LP%""hQxLBaEĖCD={n5)бUR}eW9l3j1GNӭa~ ʍawP\p}U2Må7d RG Y;C {l\Ӑ}XJcvu}y[7`l])D#5?\o=V|RqCħ#bG39/35w*f=xI6UZeBFEJۏqԯ]ܞ,_3'͜[oX^\oe9N7ZED%(9qsE}F 4Ý=Aaؔi/nśrW*J1Q :UX(R㻮;:-*`EI*y6"m~+5bm )zNYL$ 񐓹@$ ٗ%'.\~j=(Vy?Bq<мExn$?|%U \2hC :PKI9QߵN/)ZQC6Lk.~펧pA(t lǓ^2C EG13(GIε6PDn3q]"J9r V VGS&cxt[`̥ MJjgXQ]kxA} "2Wq.\b@:Ɗ8UsƘ%'Cr-pAU#*ETςZm%. PL&yYMNS۝j4 6H(=Z⎌$2"V)<7ޒ!:NS=yU7I@c@އ`bS4j&t8L<7+<8sg;ՀݨG\ E谛(EU$CiGR +EձN ["(kƥ\ E&U 4PgH 7BLw:YFU!V1G ȇƢA5h0 skQW4,E_[f ;_=$;Zp7fPEv+8UxPZ<[)p< լ#KmS:'$e0T.]ӄ0H& R2\־1tnLy4Yi4kDE4h-.BoU*QRfo)VXQt=^;AĬE!- qBX#YDOz|x4;dF"zߞ,sց 85ƠzI T7|s5-r87@o>Q?UoAȻ_;{Sig12Tnb0 Fw[ELoP%ѭ&[iq$9# w]V)NpmȷI#.ffOIHGHy^ ]|mKҢ4IU0gkU@c@piQmaw l@>v֘1"a ϱBPb g'PO=k09 4wk)\=_^FQyۣNf}qn!ۋŭY?{ȍs-~1E2Y&$/ȖV',ْ֥l5m'l٢U_EA>^Pa9z|,b ~ځ.q;DGSͅrI(~Ih%:$^6K`f'Vv\ɢ%;yՠ]nBz}|iHoEۗtAfܢ|K>X5‹) -KYZ^@>+F;*.#eZ-"T|S?(͙Jj\W?J=\=P{,>,J?c2d ofUѹç2<-5d $Tlr%={ߜ0/hهckѿg/?W UFYٓykɝ5llZ]xc0Eտ٘f478eiV|[ӟ#&9\p@ק]!–PT:pRz A2#.tGk7Qȷ ~Cr փK0nh'Q^)ܓDw뽑{"O~NZU Rпᐴ^Oqg#ZU .ÇdT7O/`1SV̆!+[&*me/0"%k< JXgԩi ,0nZ ܞ뚖=#v Eٯ(@Wb [ m+~`n!o %0R9X+.)~M6۳ЇwW?}|w#JO[G>N{x܅ɘK3s[8UFxRd 2y3b#~CBJA 7 D۟ߛK,BƲ-L);g5-PzєvTS_n$6f[w]•rxo/*M0L`r?%X8eTSJw^غO|4ՈJ,yV@a Z"# Ֆ)+M?vdzݬr= E]x"vngnY)Lի;A[˙f}]՘y Q&a]<X^ڭ*\WȤ!rSlϺ_uAt}FI;uhuˡ!r=^pXLwr*&+ŗ3vޭI)wPۧrK alW2衷Nc1,rP=4|y zC'>_%@HĀ׺wD9*W B(@ \{4I%!WLQ5%Hb84I~*[ qQ(I@vB bodpBCeAQS-A hvCJo̲y`@L4`7 ^C:2)y2lqTp$| ϯ6mxQW8(}3C;RqAO$9 6or򇞗@cر89?;XSLK2[0?zrTISa*But~˶w?t26 <{Qӧ}{3٧$9a1g^\hV;Ky_}+FHV *iAs|ZWFOX4zߩ ?bɜF^}S ?Ŗ]PVyee?t79=Wk1URJ  SJS T;6Z:)әVӒz "{s:jn]"s/ÈN2a$Wkxe)_Ll1)_G7 Gy_&hfI7<N1<,L{o|ZVØ?z*Z/jen6%.`TR"4[~SWFU[Wql]űuU[b2+*Acj"8FzO7"ww^ &18gC b.&[٪mJ+?&}uv'wC&j#HYi=Pz:WWvsQchA>K )d 0a)sPӄ{fsW)#ۂ ]p5K$4wtfpÖ9rk՚xU$E^cFePy`XO k+!a$Fņ2)8b-Tʟgff<hlkG(UӦtWS/Z|ZgnM7|ڹݴB%EڡE)_K#-T>6x}bYvE+$+u;jB=ՂDZ }`a!ęHM}EX?B1 %VLqݙr/Qa:ڻ~p 8WV<|J5')yS25qy-ܯb;ztz;TL?M糄CQdRvpx F PPhA08N1zJ݇6[)2n!'Bs ,xi0 =1E,r:lU3>-xR (:mIM\;"^:I4Ti( += -ᕒ`bU*8yJILSq)d{wGph|B:':A}fG1R<ٹ]>&ԠU9Hz7`⢚fIyRuyi[e^1^TIbyˡj$N2DҞ$si$x_UNJki VTq$RpF #{NE?-$!!y CB lAN.VS;Xi$0A”P/Pi$*k0R(@+3h%U0>Mi )q ob?&@G~r(OݼC&J$9. [c !M֊4rN0.*ei/4 b#w{NL"d\xmcZDcb,b-Y88N\b{G#jVb=q %^EJQ6R\L_#2x0{E$29y9RϬg(ơ2B`Mb~t<`b?9`@9iIAwamOOIڱ`Rp"v KtW]! gޢ$8In a!3/] P]NJnty X ,w+u@@Yzbu)mq]2c^rϮ1%ĵZQwɵg]Q.pa 㛠p?=9@Zd[ZcNmѥ$z͕q%J?L߆)]<qz3Ŵ@Ջ@[r|na0czÙ(ޘ`%F%]%Y cwvdû? ǾЍMN%?FwrSgKVf$' 5֎t(bFWw u RJ_мM.'eWtœKyX7 - EM'-bebFu3ћ<(@p߽\[r@Pb;֔ =+yZֿhh:Pd^-W>);e_6/bnt~=_w ړK(G#it/s~zӺz'nCyFuu{؋dڼo6dK6|*:H^de 9o}2 DۖqT6)e3@Zqi,EQ$ LDH9}"ۢhe Pm=5bDʅ9HkFE bΤLw,cV#"VD%VL aN{${BsNaGgzD"|MMK)dHn Q* 1d$ԑ2[낲"'](4Ck5fF ͜ Naz8$̴sr^bn`DD2 Ȕ6H&r:rD,JDb IL:y{pysQڃ1wWc{9f n8e8LdzY(2"Fb֞T-LY+rT+i[e.$K2ūI)"Sh^U%P"dJАr}qA[*$l fhyAM(6R Qb99P6#PW'\Dbpr(xëI.M'W;֧~k,'緿|6ߟ/!]X\O*R,r'جQ8Q/Gy-{%F''ʉmh#]xX2w\%e)&zi*Aft+79M(dY|^#)(RKűdGgb[UVUmUm9@|ґQnNrERR,xIrkܤӗ;".wZ=7iM(I]%j{XKcTң ^jO# 6@nf4oxtݼ儏`6RSr}ey1HW:fLHGGz, UW(>0rI<W(1Oy6Z| ۼ,< @Gŕu1q5K ٛcw-W(N e 7wLˁ2}/˛]f! F7!GS;;; ܯ&ӊG9C8j,pN01c\'JdBs4gH!4;K%·!rW͙Z!Ze6'ψjsO>`1[-sJ:O&n}Cud2ܕ"VO)4S-kj/'kޓL?+7v9IX`cӽtvSݰ߾I,fg56MbXS, 3 Yֺj[ԫ }Ouk`?H6; I3{ 9Ѹ.RmJb-RJ=l\<6D zamрi&\"i ٰɋBɒ=sȏ[Dk/;LbO,͟e]}нq>E"[rWvWn)Z}&::c8( 󾔫# HZȓNZd&*yS3ɫ_#]ܾ:E&/SM}ڼ{WnK0%ܻ:)=6mwjqƶ=w 8 ZGc[uAs1 q!$Q#" pUNY? O+ rOMOO:p4Pͪ715[1׏^^=v4JxHG?G ]1^5ƿ23yË#RV/jm)Kv҃XZ!&S\}{YyhjwBHYd~)^f4Vֶj R\Ý1e+hm0蚹Fs-zܜ0o˻ƾ{2Hc:[𥥨\R Xѩ Sh~mF䁑|#"bz>]fBFp<jI`KT?(~> ӧă*yr -tʔ3<01[K}Z"(|L (NkL0OyZf(byP:ިr+՞@y_uڌUudKܱ v;M64ĸw#)$OaYJo# PM1 뉆,IHQ06#|P^&^ q֔*8?U,T+lU8E#~?`IeRxAT"*(<eJaW)fv쓞wցf\۽RAEj\*oZ`6^Bgz;_!Npa;qk#c'/^gH, o!%qHƖxاꥶڒ֙T~Pi.<83Sx朋b3dplBa΂]3O5βB J ؓB)$/ ?+H22O1C| C18{?(L.POؼ\ {++}7k/s![su7qC='⾬յ0aRJ6e&2]4PO}H9[zx p_yI5/j^&ռf5RȒP$ Q !fX0\8!ͯlVm_XSBíJgzFaT n3ZJGTaCFm=. ̰P8>VZ8ѼB9zywVB_ .=1f  AW{bDTzD Ĭ)P<ScQ)[4 Ԧ?$!=1L1ɌZkƏz" Ո<R4F#1_ҪU7vy5ňQO,p O -rɔ"-w,x.BzeR8XQ8"`hJC^p *֚6K`'>TXŗe87ߐgS=aQͅc0%'v.6Awϳ A?/~_ z]Q<`JG[*{a?XGI^osV+Fyq<~6}wяl#_h3_&f'[q'0G;(&!Do)Z~ȹ`>lbhqN>/~>N`gf`;/0i-A{B U ; >w fK<&x`o Z祶?we%K< 1 Io!5ex-9,u ?e z9 MaoA8ӫ>5Qa/OS g?ۡG=PI^ʪK-U4h{#?Jw۰ śŵ۔:J:|ng7_O=쫶e6A q|fiK~m߱<1: aY DXUTzxK٥Ossf*owS>OYJ'{~ўx[]XUx;ҐgtiOW}+1Nh Guھu;n&aZۺƋ&FLr'>bsBcBB˩jY?ͦs EW+Zqɵ*Վ!KZ bՄizfvj'I n*T'ȸuCAyschn'_eyٜ9-9;bs|NJL'd/o,̫XT 2^%4 Ӛre7.!Re/`Y'(wWʦ~6"gH &ro|gwcYe4-u~ڌgҮf9Y=00z߆f=وn|^&2"XsQP!bx#nU¿ۓQ,W3״L_ΖT? btKMҗj DHJ@y\)%X白^*j4ΠWa S=׿MJqus RhR^[Ԕ:b1žJd-Ujˆ&<=3Ԍz@ACzb8*T :MfKXy;ܵ`NJsa%]WiNz2]W)C ]KQ,5քJIzXm6JJ٩Cj˄樧>RUWKbiE9 Fc[Mg"4_| ~eʶ]/̼D2nveB7 ; l zSr WwZ6'~͏? X.'fWMX8]BR3yOsI+X%ξj>.o+*٬PtrHTK}u*hP9  Q+RhrKH8>Yr<O:O0qJ٩gJGTj`tÄpKd)voZtOb[ss=PLדuNELB銧-N: P}4 ^Sǚ>+1ɟX1s›̳rbC_/&aiCƲ ߔ>NMB4mŨbyj+>Vdc$zU@],ZnouwrgRF0}8*{& Q*( aPOj#`TJI7sN%2$bny3$āT0_?@:>{Mw?HJ+*$x9폅 U O F1.b\&ŸLqYUpRhf!D:dDј@6h2تG %sy[:^_KgvvmoAb;g0؞W o['<2 YҬ#yAXϑ|5Tw @x?_7g8vx6e1Rk#&8ý$: !` E[1"K {]Kw Hޯ rfe]Py}zכW↱Ji7/xC)kBߑᄑ\1tuE(?i|eDDpQ ?։ruwR2!+]"2*.FS\y[,>qJ"J_[`4āT(yfC7=40v=LrBv6^*l.LůS=ա;&;IN:[̑DF#`.Q0A[n5@ʜ3‘LCp]x[,aBL?^}C~OzШ|b3r jŔ)axA ܤUdjOj3Ae0%&ݩ VP|֟Nez11[-ߝ9_z^.7g~'Mϔ27e33,Ez[\q —N4|FȲIe˧{ɃXVJlY65{;DNSK Jl}|Y fNPMs]:c `N Hd_zO,LJOSĮa`L+*ԙCrJ̤TaXzLtFDP3i'.C` B'c&",%J!XF0ETiYҔԻl&]ghF Zrӆ0R0o`!j,*Ex&%%:!YAc͑arX.cCyuDi¤[akwz"N}vnC,RL9 C)fsYԤ@/:@'I m {gA (wݴV-[ @YOP#g; IA>VGJ5+LUjg%E!̲*⿈O5 H6'1=K˸"U p4D+N?ZG!͗*PI)0XGR1sе7K䬟Awqv,򤅉 V3cQ K' ̬amԷ NO[0% &߱x<d׷.f~fK !|p-5[, %~1yrb^7뿶8Ǔh;L:w `ssssQN+VIۇ+g ػ@Navq9gbR=.܆Myb"d_]Ӯz"@P~IM-2O=JNpo]cPR5. IWTQVnǯFRD"1YMVʍOJ5AMrM߯u$8/m7yV*Ie+Fv\&jO'Owp||ßg'3OVYзp;54(N5(l!Bԕy ك Eϱߘn}ޛ^o'u587:$`x <i2R 2D%hWφ`:j5庫Bp"r^x%  "* hTJ-!SZiz~DtJ ]SFW_A-Xb@pe.asNN]yl}*Z:igRI(R2,:+ u"ɍ2&E) U0GrWowSvl>aAe-J }7KlnmkHH.ua% #g~[nzĭo'_gE/“!@G+lXncY!9,.s!NCk&3Y<] `Gɾn"^Pū1vǓ=_'@ 0oURԘ]WD vyC@kWQ|ʲJ =[c0VL3s'TL3^;&f!Z.B6KUccbnHZ);:Cti 8]m//7OЉ Smt"d@u|aQRl i*aVdҤ6 ;N< 'd"{M)WHdP Ӝn1l!k3F'FNgԒ,GT/>?'ƴG }Nf=AFLH7Jڮպ"V ۨuHDK.R]8# QR( 4?.l&bj1D˧W(`Bz8X0^` fuK9˜9:yГຈ j"HQFK߽{0:&j$sΏZk VS+5wmu_pB;,{>^/H\=>ё@; `hj&jk0ZBhO)Xwh`Y*7mTrZjNC_l$Zdqc.)s|I9FQ'w)f ajqʌʐ˸&hX5%/ J[NI ,{G)s7.*J^:)Uz$ K}EU @ୣMXm\,q"!x[[M5rk?0K{..FwH#,4@\^"s+Hq?ǂx{[VW!<+\ILUd1eN|̝rO,"XE`UK^!]OkΦ6Ԁ3!)6͕N#ȴ҄bn 1NR9GC3grT]Ѳm?t6)OwanX^|,oN<ۙm-!+{j:ŀUtvF(M5UoS12 rkv2I4 URmRS4mSɥz56K[O.O` GׇAW" =!%aa^oǿO(@2PXOXZ z>LN(p?+k)Ļ\R9ی x1[f8ab/S}A5qz ȩN$^ͬT{fFD9BI3Y_I@Ǣ[}'%nP jJҩlk%7qmfe~CHe>%7!"ѹ ;(Gv:[ P'llT "ɠB8|8`4}5T Swp(]1)%9bww^q7/JZ^j"Z:"~|r8tqFC˧W(ݸuҍz8:_tFQ' n@SmZL1it~8u 'E$#wFהP zԔ@2 .0oԦ버"UW|k1&ُgqZvI{,@H|85KTu=PgW69v4ɏY*4\pM` qk-=\=#ס]o p C̥4;UԢHSBؖ̈́]91!e+Jbhj>_ϑT_rA1Qr}hd=~5){_^ꪖO1ԩRA d{]MQ6:q5P%Udcҧ%)g'LH Gfj1դ޸gk~|qYZꩳɟC?∴<|Ys29'{ӆr^9Yz1cJ 29fP\ ID#Pe$drYkor|>KD ,)/ި\-RB⠌")< Pɬ3nhW96rU\i')zzy0uC\w鑈9[B" u ZHO$RHI$,g%&RK RU ׽B[^7C˓mJ(\wx.`DV#@L&ȖF3*㬔`T(%7a9G+=D+$J9)PJ_|#TGZNue Z`_<ߟ{SNqLoq>AOŎ]Fj}7MhۛCzpbg>!US4eSE)TNHp)i|wrzonOtiNW0qM0ġ룲 6a+H׋͇" CR R`-| :3#'mD9) e3%JRt8rs [.^!$B2XrIF'zR léK`k==!#Ƥ_xy-%ձ2cNKu0u%;$*IFj"dMJ}o3'P+r}[s׆9ٻFn#WX%;[rJZWv8WgTHJkH4$`@jIAFuD]^"bZ~U=n6YNuTv뭩AaZr#c;r>9h7[C4S'BTA jW[}P 8Վr3J;,&+/! ݽ L*$ k8JJ8U$O-iZӦC垽"H 5A8+^{yiY90ڭ Ho0\#0QCDq2N \PD(`[a*1<6>6 ڈe}DHg̵*ϙwRr&N|qKyk'״VR2)$ӂ*9pM](*+ *儖"x!%^:+ 52r3J9Ӭ+ T ,+)""K,ȥȠ6jD'ER͌hm<+վJZ~0"֕WQ##zgLiWMTe׿#zy ^) ~k$ :p 87kB7Xk饦J7R{ӦcXmݬxkY!pţFYXp"ld1C7x#ERnp{ 8^'ma@UU 5>`,adY'E ~84C1je?&U~>I6ޕZqYlTT*EShۛ6ֆ3BdHₔU.3EXӊH1BiT Pe\2筅FX+ޓ*F(TCAq(+ U2yH2eED(B_rq3*Cĥ#`" 'qN 2%|ah s ($)Xb4A!t"RL1u"m eݦbrPY/`*GNDVr;j=UqnglZMRb(yܨéS\0t@f2v Xns$rj"/FJRC'B]1Rt)E7`\zؖRJ@N (~)EN LE0Jb_(r]>rI(Jb~EqWKL^5ƛr@1 x]թj|pZMXKN"^TfVjqWe[4EE0ykŠ=VmP"d{GÒ8wb߇N-z ^7\倢`& Qv LBr[QeiϓX z$"_{ HpFRp.H`Xl %Bm$h>jXAxHV86}IDϊdݑ7/QB$a J-eu` E?P"B>@t" zf Hx&.  v 1/*sL`g^ڌUUc,_>S]Gyg5}/&yeeYi\诿1c7ƌofyC>NO(S$ i.2T)0P +3'yY UR"H!-F4=5mL͕QۆޏLq12䴋n0<0ݚ6?,WmekRnt6:,suoo-9H[]K!/Q*RUUTUPlc3A`2RF:+)57r9K*!T0ʅF#/g3H- 毈42eKFv>5fXJ=ī9#;bW, m\9ƲRoK92*wbl#SJzWj̸Xgj8ZzL cL)lC?v?*T8x\ӰFDPp*sT0ePB& Jr{0ZMtcڐ;Tn {&;Kz:4&vnboW-*|\gDLjl؉+OiڥRi!N/`!A'WdNbERje%i(1LǶ팜JmF0T9kqaި`]n:=mMx"/v80t^fI&^ޕd+~):":p&PC|/HKoװ 5FM,I-wj卢P@*Yn*"7[_3yHsЏOgOG&**HdQ.}# QM zPct u_ o])]89`7j Wplo>M-Gwuȏ:ZTPdxr_/ !xti5 `yC:8JY ^ERLfC[&>yU3$Bnv@2C>#i 3+&쁲^hXga8WTG/`i XZ֙hA3~vOˇ(]!B^Zo6kuAT];|\pR1^nr:ƈ3e!c|VV![p(%%];k$bLཇ-$;uvbz-=lӇŐ,*p8TP$)sXR֥Nvy.n(e}Bi?P4 Pʲ4GDe)E &Jg\V~?T)b`%@ ][*Uת\b)UV\`$rEQX,'UY( DL%)DYV@ #-+Vo>[LJS`~jĺT)b9['Do<=*HwYi?{2<\+a:$7|07>\DDpR?'ǫQ5/ӱCg"$J^>='B%k]Xu2,nQ3^ƌ LWFxNL^ h}>ƄP=&W2lg6:pb\|.VŒ*͡J3H+y%^ ኗyʴi&PZҴĸrK:!XNFw=k_nTyu^e>pU{c~O[ ˚"AIjLONtqk|LH5GѼ\V#g ~h|j,7$YD 8[<1 Mgv"Bx88(11x(1j^H+@EQ]P=-J (y 1B<1ܚ_r#K~ӾB8uU!і?봙A,ͪuqYR}y(5.I :{klxtnG=2?GWۤ){O mgJgP_+k9Pw--M6!lvh܍ an.i O8G-W%=χQ9D Nz磈Λ{:7^;O9ky{1&ӱ;N&teQQ){1Q;tWg2$xs^8%x)@F:g 4T8~'%E0PͧNO͏ '`DeT3{|y"8h/Mlj 1t^}&8 y,9Pu\~{gqgtV$ʧI^~.o~nI^q%[k)^u1ITzbӅn<;ӝ^IUke{ӫvjINN,'&ꉷS7/xUhxH)Ex(o3H3Eɽ>UˑڨnV}rꆴM{Z ϏwԀ啜1b lߡmIW|2n o9gs!|r}h[;7]q}#z?.u_>}:vld~?훯 }+4E>*0-iXj471p/w~FH>iƵpazts_ޯHkZ?ܴ}/6brVe':d_]Kb }Q/GtvThOф -ã( ?.}Uh(=yջ ->QKާNGV;s[?E/k&}KeN\1='O5e ae7azh ^135?ba=te'+7w 2ayHH2PBI{Xn^cP^$(^2VR3*"j9[?w>XIp`nd'|{ڸ|(%=l@VhZݠw&aiSoi_KYch߹^?MF0Ȱ7ڃ<a mXC$CĪj rC33d@3bB01bü%! \hxKO95%ڵj[V%bd{O4ˍ»;&6{m8e.x9ݖV ̊ٻqdWT~:{H4.SƻgNyٳA VEqvE~zxFv ]wsWO "#_I3?]Lt~X)lzaw;⌳ۓOo> "^LfnUg߇S$AZiF@ z_'` BxX)^L4 {KKŻAhc@ϟ v/JR PDTSyBA13)g`(Roa4V-|:zmN[s%-LOڨԶZԶN&inݯ_sz,CSI3+$g0(u{g?ّf=;T8Uv6NsY՝zAu yaD7粓L]h._$29aLQ5Ct&))hf5ETST^0# r! uZii*YsoJBe+PbZϱE<& pƁ( *ûbGi]8omd+JvWZڻV"TkJU+jwԲ?>T~ YwB4wO%AJ!rРP'kĴ-v4hg.c,)J{@Z|L_v[uxtoI6A:_X`Yúoܺ##ωozLP|9ӷ{bm>jZ`%=5z@߫ƚa7FbsS5ՙ\qv?=wEfMh߯dE7$;ok3hD؛p ׺|`PvSZy3UVqRtU 0F&gjۨ!D&0q$;~锗 DRf܄r;+L* O_Φj(˔7\6Cȼ?Tnjj`\o41E)aQ}Mup;J?ʰPJU|wtXܾS95 k=j/y;u=Q'j'_f1^şAPVyf_գգr$#k@u%+AD(8^[U-N{Q-Hi -,1=ht}Xo}sBWT Ϯ凉>ke,@&i\񛉀by)DNQ▌aΔS-玆jR'tKfkF\J.9MCQZj'!D@OXVq֪޿_Vaǜ 3,Dc!_t䲚"qn)Vx}ZWUݯ݅8n&2:vPB*lx3|vi;]:=JW@}Xzawe1k)@%EϭZiSH/fo;+"tHܸMbKxMC6mhLw "mƹ`4Ҏ^$}40wh(b(f2_T;e;ױ$8H]aBm&j$,a2^,6S0 9si^Kg)mjQ9̝Nnmz=I{a u:SgyalkZCc$]飼!5yygڗ `ޡuz2WWՆ{^nGjW烯Va_|5un:Μ\ Z<JZPk$*FZMPo 9">Jvh}34 $BD5rФ~%aL6LP@Xw7Z0y]+S:̩HgYQ8ì5uZ2DNAcDy!^gfOYS 0+2EipDB,"5$|Z UPP"md+3Sս 0`w>U1(e}j;w)́FwHh@i}p.IAO'sA#$$_^$OT`O F%]/Մh|OZ̒De.!ȳ}G8nx$017#J b;IcKS,KfXf,ʜ& ǡD]xfnZ]ua98/V?U+UE1~58?ϒnk{@zc[yݷ1+>Èx=y3Ⱥ+[_aF( < jͦv"2$rM8#4 .*ZRexÅGE /ܩBsU0+T T*QX!tp0B)B2+YI,m9JD7.x4.D xтjY,8SqyB;LZ?1Xó~xkyzV]ؘ(p vb ͎ENs]"7ӓO4l,%0c^e%P8`UJ=8"wy4Kѫp: ~RdRd9 ^ךX`E-<5w5s{Yt%j_mcٴ2/.7k'j>=7,YB6ڂ:q/O ,)칟'0 mC 1妴K~Z@'nJ^m% NaF2רK)iBRQ2썥\KK^tj*Y^n)9T'o$)v?JÞbHI:QR#ХZ-w(Z "Gyjjy^2y*;%PbrxCEj ᜟw =Ok!:ƒ:/}J!Ρ>0ytGWGxCQ*3&9\o^Ciޜ'sʴJa"SܸӺ®9훅u)vԗBJ.ڃa^s[2RDLh0 •)5\?Ka*q*_H`'`S$K+a,N#@@cEHzMGy! ΍fj.a<԰ A ~  =URpjKڠɐnO@*RH5\jPZ' Z&ﮗrDM؆HvAZ9ŗ+lG.箼6e .?MތVdoMxBưu f:aJ'@|AXlKb3#8R&3CO4et bSVRtyj}JA6`/\b:K#_p?yggH3!:I3g畦)r|5DF4ֆw;khЬ @##ܐDdΙ✞xCNSe7r7_Sl1>pS RXA2TyA$RZ0ehnD)\0%=I[(G4B LQZj2Bx J%^"DDCso+VY=M/Ğ{sz}RvL9qL;?UWI7"]nL idjpȰ% Ҥ,w@["  3Voy)R@5>UH"B1O2Z3e4廌έEyXR;_*uS $y@T{0r1ò^\LXYE rs];3[\bӌgq5ݚiL :l h"42XG¯f@ŭ^~v4qLelurk%2Xϴrzb&‚&aPo4wwyDh ^p[Oo./[/K>|K5*gIF,;ƀ#'mD,cD.a/xBBOzxz9>@T[(5 0m3!R xf়NZ4Өh5:'rMn þwKɽ(E:g4.M8K,=~۹/=L'>Xo*B U+{WH/\|)q-vqw_60lrDc;[jM o6;.1;c??%kRD@֓Ԅ锒 U~[ +֫nR^R/qW;xBoG,L.&6~q7J.}_(ԆøW ._1bZ< >mcY"s MgMN|J5E͋?,+L!>8Ta|g=sU'7Q9?;kIC޹f㘧X71X\ RT'&֭GNt=C-?,Ѻ!\Et87mкb:m4n\ENU-??Ѻ!\E) F|}Z9ؙ=uu0\<8'& G狦Bγ H1} MqM+d矍,?V-M齜 ljpkqc]d/jo8| cawlvq~NAK.|u1V7Aʉh+aqW~y OU]=e't+dQTMV  X9 ;= $ ˁ8F avӇln3g}PK"L-I5'HysvpRDo pRPxH?r9A9]RBm U%@\u34yQOF1(7Z(ÿXvcK">,0dAd(圆@jC]V}Z+:e<opʀwpQHʱr{CͅTJ8QC=)X{JlF/fD~%h]7V{SP4T#%Yx&1qlL%R@o@6npe%0%Q(/-8%-- Kr1dU01$#>7 u>+uٍt~z5~ya? +DpO f"̈ᓕ*-^j=$g/V10n-r|#|oL7n?WJjڇdRf%JI %E99N˭ [| 3Kbn8QP>d>TDuqoM^ !N+=WЩB 9 F%[Dv]4/^@i!';;@h '+H"4zYQ;xtZyњ!VYkݕ?٩LnP"?xh*_r46q"HǸH׉}#9gЋgǺRԲ=)6iTyRYzW&xjaQ5j^EռyUW nPsZ`, iW@BZ*pNsHJX)|qU!qnCE<OHٵ[?)[SssSٝNxU)sܔNB89PV8DŽ4\{*ELP8)DnЫwIؽt9R Íܱ#4=6Y&J1q(Z=%,(bQzT6(2V#IR㡌ZvwfW: WZjD1#Qj]|5*nufx"Fɰ3kִP ބ QRA SxЄX[$*F/0j`JV`y#!RGKPyX8fq|a5C<RYAj1 JfJyTQɪ@٠ÓzTn{)UQtmD#x\h{˽{ʼn/%Ql3鶷_;g)ǝw6_rKM/h|oU# +Z6;OJqL<=d]̮p e 1B~>H:G͌Nxt):~qlt& xcGoRHɥn'yQ mZCK=aBaAVWԌrqf z"].Գ.%nn/i<f.s8# L Vs*oリ$oIqCz;7߬mSCr=NHWgftF2R8sD;1 ?xc_1'+c=\v~V5}XI. +InCQv_Tp?Th_Ju/#9R{0Kṭͧ_%)֒(iuѳM]T ͗jؠ_v!8e\>)Zp©>eku{),YԐ φI>,J+Ytm<]6öv^@Ywߝ$.w6.Ԁ}Ň`V7}tJdL]M4)MTϭ QI!m[b.c ITGN>N_QXSv{<)$tN@;%mУhOO2'x餢;$OkcV)TTV Z#OJh;i[@.:ܺ$si[) -Swڢ:"Z~fi(k_ڜN(ӭ9{N1!xU.N-2oZLhXpGq 79@I,/HU*鿪_//! F-'ReJ/5JQ^PZ( /)dry܆K6?lh C)H{ V=dlU2O9 7gm% ,xꑡ3N@vLK9GqϨܰB*a0t{/†S\Ȅͪ~HLam 2ERhZr[h0mDS##hMxz"@hRZ F¯a%[_Urq8] PA 0_g ;.?L/nϮRΫQ0L/!&fa:]wZ!`qN 1=wͳҜf }jԃ>cGxbeNMvݯZąua!|8<8?f).nٿ,cZEvi1&t6w- v#v_ܙv"lLH˦Ks6b h'縟?;h)}F1U5zgq]G"\K2YvF @){6;kK/~aCkr:ƀlsWΆh*[\jD7aeaHkS;?#OS?,޶;2Lݲi50SkcCg }Qwb \AW#BRJy68\#!Q/''FɹuO htAT>&| !s?Nkn`J;/]VRWDΥHG}NDΖk%8)BtaP瘠UO{Χ)7ձ<"=hU/&tf_t;k9:bO5J)GzJ: @KZXy$/ /&xZVt(B sدQv/l䢙F$Í}y(0zN-d)#ԔRJxEDoBJ0&'Q4ol ҂x.RxtYtX Y4( FQ5ס]R%gFxUrćSꉵEԠad2JW,UdZԜvKQ$+ Z"^@ T(Tz .ס'yd A\.0G(A\0Y8^ID!tҠ7H$nyu2oTb#^*8cS'`oѨD([t EےS*Cfo[Mp [D]0XEJ'ˋy?2)T?]f㟘C"`y?p)AΪk72mQ#ކ߄a!h< Фpaa  ͩmcad+=ړ拃6ҌVqM=PTl0X:$ (9W78hD'L*%rBKdOT2ekF,0SDj8L}Kf=e)5 +d6'z*%&2i$= (zyզLѽE7j Jr0nkd k3۰fG]V۰ڠW ;x$#l[@,[@48؂m6§wdz{5"=P% }XlU;=-\k-lfSay7*zz~ PՆӦM< fu^q{2IV }:8YwUhR ǩe(zؕ8LGQ=-0y2U* Sǂc.;:W[OFDӼ:UE{>NܭX],cUjƓbuڛbB^(^Wu .pS |9Oem*(_z|VG[ne#^av\oOFY؝]Qn&cfݨ:/]+ɺ4uNWE1x[!>bVD P.ԡZ>ZE'Oʠ{keΩ\lF+ʠ&~tB Aݡ؉2h%cٺJ;PҟljwEHKcv,sU1{cZ;f?aBKDlo/ $;ijWYګ&\O8iYyn-SC>ZU3_2ũ3ןն)̥|A @& ՗%ejbv4 JrU'5ԉXEpy tTv_znUH)&V3,/df>RV[-D^¦`ꏵ&{O'V3uSXIl6|F%:/i>ksƣ l ymĩꓯ#ɓލA|ޭlUzfSܩ6/aٻNFfuw>nJ="5˷ոn!/9Ř"ȿ8fFv-%^cUk]"]KtڃD.W|%B̪q>fVfu8*hKw!)o⹹9fYQaAFȷ*N kޝs ݽWTA( rA 7t[w'h 7CdkhD!,AL1rZC'՜|PVfxjaYDlK\]ʁ𖄯d@r ':س=6uѪԤIHs<;LE@MOMh!Sc1Q@i 2d,qR&L8 4"T_osu4t63Q{7$4Lt^ȓ^8)99BGXPK,1JwEe_P]*RzV< ޸j|jAP8MUc0 3ԑQ +b 0P"DL쭾9#T)׋fGYޘsJ3b;%ht:>,ݷT/ g>w6)7`?wijV_=KGlV.}ރtݞ%P?],Yʌ-z_.{o2l`EnLd'Va"`]꼂Kd);pTNOY}sd5E F?Gu7kޙU{ncr!Qi-'s1N]ɰdX]|*'!eǜK̨K?N\b3ZI("-yOӻZlJc\Q] ]p.c-<,ZdsP@k9Ge@6J xA ޼=)@ݯOxs'>Trp8Dz2 D_8=㨒_94S/1hK}t.[O_;zGv^|alzCW qөpxrөj%?"h:DRq"2y(X IqC SN@ ! 1@< (Jq"AD^"$ϗX(_H6hNz ]6n]QMENJ[?}[Rf+3z;OuV7ǦɥG| haֺ̿ZOJC4Tf:W>C3USy2|)J޼!YY-; cQ&м8;P7\e I ٮ*d#H4q Q@Nv:X*)mZd;7C4E˗hV#㬏7v1ܴZMP6~= aGPF{\Uƭ}Ιߒho! 7 ڬYIY6h<ΒhAm쯭YW|v6Zb9?6U,Kj6eMo="m`l^c,?MY̅fq{"J2J?_e3N;~K" xaR`}l\+h˟bgA !-7HcH㗅4`EŹ/HrViy+?'@\a2 CmhG3>d#8in@I ÜSXs[Na&]ªhWa1#t2 ԦsM>$돋a'PaN[8C8%Ve8usǸrQ h{3l B=6R`ocb[$QX\jd2_!Q4 6g.՗,Kn^u=bm4 n|enB#o;2Yɴ(_q{'fۼnBd(I)'8TiH#B& "J F%(@Dd*N:3rj*qG<4DqD#f)@S(ڍ\XZ EL4qPXf EoMw$: 8Wͨ֎=2/}ƯDwP%￾7ow__1B`)V,MF0n #q_oFtZoy0~7Y?DbzhXQ5zm$j4 V럶Zi%c}3 DPVp?E^ݝiX/ # ߏ UГ[ֽ7eHSU#44whٟhb7VLe&ډyˍ,h /Xj'潵Xzi,%d CWꓥڱ4RXJ3d,)cif5\ Rf933v"KfrϬ5Xj7eXZnͱXz,ҎxWKcif55RXJ[Kˬ9楗RFXH6cKcif5SS^K);ͮVO3V[ }eKi Y}slb/lbnR2%`)v,ͬeXпϭ(i>_Q:{'mDf1*FSͭ8y@"xJJ&OIK R)Da8fvQLI sT./).zp$A*GsEՃ x]hmܯ$-VQ BpQNm|3]k1J/z+n0J[]|J"،2qWWE(]#}9Ԛ2m̐t}'-sxˌq~x+#h8xӹyI9g-r6']Pllp6f6 6ll&39.1dSjݮ2{H?mu%ѧ~R|ۗ_:NExcөU:P\P=ZgΑb.*1=*ӴA7n7_û_YXQN5k~~%I=Ƿo|-N_1S LÃ_gqs +U_]AM=o ˩51Uf6hCuP=]̧+KA5F{"҇Wq 1Mc4aHGX+>iZ]N>3U`z!ыq_h™ gN+K鍃$oҎ"W\EW :-l _]rO~?m@?nBguઋʡzKEA^ \yٷU-Q$$_uaq@ SUgw:ZPͰ/ä EA=tNc%)wvŹA=)zp$C"@)՝T`F{YǧVHqo[DDǪÎcLBK;,{(z%u`ȅbmӾ2̻f/ߍI!FC͉d5F0YT{i}xG[C[C[Czd V#ĭ2i4?g_C*Ӹk_QOl"P4B z-9ֳmd )! !$cG!ΰ4#0I'Ur0A=w7}8I?̈ƕŭ߮ɥdnz66mw_?=5PP 쿿7ow_eD!oIϘ/7-{E+u.W-V'Dw`LzD{ktA"j4 V럶OOcW"PS1IWz; 'cLf ~gϭw ojʖJΎ,i F!qBD7OچB%*DX)ee:+岝z5\2KYx2J=`i7VC_z,iRud)H;fVcҋf); ϓK3à\v,Ԏ4*e {RJXYkRXTz~cr`_ns@ͫm۩^̂(yL/GclXDpj`Uf0TGHaXC@vTFAS[x"8| f"J|q(C !KO8KN~aw5553,FҎ!&ٻFndWd}? MASIz/IfbnɒV_$;BnYGȺA (b-cwQ R}@nVm_1;h朦BR>d." GלH׶G6DZGPzԜ P1潵;ʭ/"HVDTB&>0!# „2j!1-=PoC*ĵ<ϙb8B"4=`masJ$Z8Aᆃ #Noo| m Wz[`n" !P 85Xg^hDKTd(#iG`pL@}ŞE `BYY$2!WMZh\(D8rM%.*MÕJqꕓhE`SBH\g}(IRY2,7*&}ԾDj02 2Ť(׿$RpcLLfGE#h0Q)|FB >Fww )RlliBr%Vశ~isᛳQߋ I_Y^r+#jܵd/LDpG-I%nG7O\=8JfUã[I.?lW>Oe\wo o27ƅW6Lhzcp1" U(Pz@@A1?D"xbԊ1*l< zZ툡.<$RӂQM(,?j.U{ DfeԞ QR FKܪ rH9VE }" Qx&('>ܭkahAb:nu[2o؍dNɏC^௉ c0v)2} Հq?#?G=qrQwnIMdk-+W͓aKYdHk-9f>B4 \jd͛*kÓ^瘣WazEJH U(i|.WL{ I@*x6m.>OH5SH}Ͳ MxZlFﶕQRPUDˌ!a<]܌>8Zloq{VSrdҿ'kZz8|DT"| WU2.1퇟 8Ut SFcP0gTajQ_1xo!\1%քj!u )V!| 8|{00@L)HwҼ05/D-{ZR-1*gXL69 ]XL'?(oF7.֍D8;?K^^LS1t`p;~Ư)VmւM;WM[ju-ɉ^ΧѠoR}"^gb،x[ hw8\n3ccB·k>57wQ1^q0^ 8ϼkp`ck9l_1.g{lv~_eQ%J ǒ%C-Y/^dq%5&0ɬtέrSL'2Oڴ;x_mROsP"9P'w2w?ɁwlM$Pp5'+r]s'I<)=4뼝>ٓ*\LgcϗGJ@ՠHHẋn>Ӧ1:ƔQhJ҄ q_H THGD|NɢN;6E@jX-@j"3fBXC$DX.Fm ŒL<ȂKbRIxO/y{LwBo (8o[b6Z0J7V#F)0oe^L$*Do>M%Hߙ3 &f>:47RF}0W3wۗ"-rސ>鹉\ιT`Zڎ !@_z[v@X&,Mu÷lFRqP{+?vlrJovwxk"A!· {RH 얠ЦxZi>jyp+eI0Jzo=G4\uK"Ch2_@:i@2^ܪE|jSkNdv}7ŁVȨOYnqtv7%o95!}矂-?zfv6Z'k4w[[ВMVg>%ʛi_}`K歐=EMWD11ȍ] f],=W1,fĊ!s!EznB0ƴ/WWlC񥘅y'Hb kIʯGC=-M ֲ;(𖿚 T qWeYR-jDvNv9/6rv~H7'eq Ѽ7{HTm4-1\l6 !Q X`cWW=|eoU2 f̉Sc˴iѿ5w|z;lFL-0Ān*;5shWO!Wh!՟РE;ʙؒOzC+H3>PIvGZÓ״Iu #\vO+ЫDz%i݋ MTrl$2)'g!N6ds_Kez2 !df-~hmՎ z4FC l 6 W":2#:ljmOGtsaF6fuL8a`J|i }oi_zgfog*Sԡ<3w:ԡ?*t/sB xC.&ӡݩkO >_>ݭgR(3`9MrӃӸ}h(Ty㠦fo* Q=8!4^N'̀׼ҵNZ v֮-Ż߽ . _*;XK?2"S(WJ1.JS&-v5NS4V:CSNwt̩c4[ hwa?]ԝ^}p9ջ7!Mz+XiUI-`;|t&zB0h ԰L`\+q L;A8e40<)Z[TZqJd LP\h-wTiU@6 ϔ< ipq5H|Gߧ&Bm}(MsLliTFCQK92wm3s jK?]i:x)>_-~eD ى _#:eJPq*34@>ݥU;$u[X=DT?aVap9],_ƥ.l5EOިkE-ܢ{$){4ٻF$+^vw,%"/>G00ô!uZҒR{,RRDRŪHC_\8y<1\o>=b!|in7s'7i#=]7_>sߺe-e7{&&<7EJ#&tbv"a6VՠQGMsp{ TDt3P5KV榻 dZD4k;3CϤ'bbq?xO>5VQ.sXnݸ}|g"i|UӁo]w'7%Z>Lf|1mBg /|q@ni;IHh5gV>4B:D !'Li'?RnFpBv:n;|L +ш٭8D0gRn>@'!mUQV8[9q0҉BJzJyVVWu) $e9}hl{,> a,ڴ3ƍ铸SZw_|=BHަ:$3VHQ!i\0$ݛ%$VLTE6F'4B ' 4 xuz`ҽJ+so:~kfJc?cfJD Wޥ,ckQHyD#Z8ɫ_5JJ6i63Rpgg@k >5Sa.q s¢Gz0:ܩe'ϖur^7r鑹Nr,c1 =ⅶжDcm¸F9D%;ݬ`k" +K \1Pin u S%T0"ELK|,aRbTy0 &/ IJW5ZbnSvY2–ya88]cFeѮlc[|0/(2VX)3U]OK2i4-ɥPV ʬ|U@;Hߣ}a_ow O2nR!W^Ԧ'qE乕},e?E}Swq><{\g1w}u䋛;?@N};0 xמ/.?g態]x~0Fknk: ʀ)e;wA.PZ{f#J8w0:hYq( ȕ` p.*QY7+3Tp[2a:E^WYp7[T1>J7J>˫۩{ (v9S" J=WX394lMæ Q!wfM>j]C~f ^B= @cips5=ڽ^\Q ^CpϑֲAרxlniT0\ +t<}O_^ 4ZU\OKL/>"lh\ C!}QmʨqR%#7շFOvH&5"2eO{Zf0NTrqLjt[(R6LiHD -eXD3 pQnF/.RQ0M)`';ڠᜣ2t$ip AJp)K:a9fcEcP8c~>0L#BNQ281UvZtSn>@'!my)oH:[9q`J8_g7-' tb\E/[9qvƔRL׼RJZ{_.|Ri|y(岎_n6/_YeO;{-,xirǛ\>>RZW~^Q.>?<_~ :a4ښoV6||ÍrQVKkyvW1+Njlnuپм@S(US&(gUiUV!xMöC C~6+jW:@'siπ?%1ua3ɀs iMhP.(IFe QDMZ>}YUlZn9{E&&T4- [YnM겐 !R$i: x>l_Vij"H@:Zl93#}5c"Yb덍hplp!z,6c-~.dʯgK{2b3>]v|Z }H}4benf [>سg?nGĞp_ >]V q-ё %DF4@{UC7H|*~hgC,4eE!- L B(LLqbe +*V+P ceLWPMʄ0Xgӭj[mt\} v?(n: Zƭ3JKj)V_ʘ3J\PʵG)j(gUj!F4JQ4r>^Ym4Qz(|V c GR$N/> "qįP;PQFĭ7<ָdvQl\zثyPcE8t^y7%Ł%$ZʐA1b4bKX,L&,[mb[& khtF+4k?{ ?h/_^Fi+& dsg2kWHS(2G.pLjw)tHcSpΑD юopGK NcTL٣ҿ7E;Yhk7{%܍͒oJȥ:qۻnDc1W He;A3޻83̃:v !zu0hwp9P{au&a>4&bCcmlM񖍋KG}ؓVco؋Fh*Oݑ0ڨmWsUj'FR7B\LQ F?"uh _e7)Jt]T6kD>V⹬Q āj$P j0 ֐3JȊ៫c&1s"jRQz(-)Ji[kQǍRh(*; JFCim5,dx(:kd@) B:QR+!iKY}նYQ*s~wt(oUjy'Q* ¯CX6 ECimy7JTI@4VsǍR4O8EY|N7i.ԯfi~[6mƚ7zн⣳}{%h#$ݛ:$fd]hP 5(4s)č!w'ݛ : |_\Jj~V Dt=HvŔD&MM^fW%2sY*l)Bq= 9?أ;8 <ߛb1"fZT*[,QaU BTT҂gμgpRzZ $./\Ch?p!  W4WQ@\zH4Z`UhVPR78D8kDʑX4u:zy'(tk{4?vXw9g3X'릦dzCOkؔD}÷c\&)׍p2EkŦ\w8=7N),%hZ҉@jaBf$N^{f!˫C_v:C spgh}ʍ@v洍|Սǻ_bl^#`=knDad@m:DWtujUj?1 eHd"6c *Ej= !)^g7'b}@vFhSpCTrTWUg*= XP6*[ebv!'4S^Y=~?j]}j7lUVD\b<-,Jayݼb\9'4㢜, 7Lw_?u쯟.)}_|)P˚kuyeN mG# 6(94 9rV_V1[KxsHҒs;gPSǐBA- E*[*WPnZeg/w_nk|:D},7bUp>;pen&_١ԍK#B?ٻƑ#`EWUL>yw `b@6c4c$;{~ݔlSM6IQ6wcE֫5im<:uu3(!F9yx ͧ""[wVG:+Fűm.M.nrqFBObWuxz@y{vP6"4lTŇ$&'M"[+&3$`ȝIc't]ΘRV-a^o UqG&?lPC)]9>:A1d6GTu_`r㨼/ʯK_M]oyv{!dB1|_++G`voX~]<؝f.Pͦ.Pe"$j%{=m ʛ"Rrܗg{M,^mu;?>5*a*9ԹJj[= ;Tb`4*ݷR8Ra]S..8jOK b/.Ϙb{񈧹W5͐׃y9u!Q7R3W 3=H9{hYEJi4fWk24oх9p&"M+M#hEFBͨ(JF܎ztbH2=E A%/u :aHuA4Ĭ]؅*QKRg2+06MT')LdQ6e2C?n*t~mV]hXi;FRUO:㈙*AJB̖<`Fd<%0@a$Jh[ab$Lge%ȒPMs<-eir*&9"gZQ)T#&Ɠ9h:"P8(X{k|ҭ (ɖ H B-2eҥ@R1r`PvWgdJ\/KB#W9o jH$Jb'FCXg(y)bv ꌼ.?7ӉK;"&AgG먤ֹS!#%]1`gx0p5r0-kD0F=k?פaW3g30\!-&g#ƳNk߆Ӆ{N$_Aٶ;a;u7bD 6P(rb|gF(f^/e3y6,R] lCm$ yg <=wIFe(Ϳγiγ'e‽D'WMz6iP1'l6aM \0nĩ)ם)@׮ʰT_I|Gd(yKh#nM.TF'&Xm@/}zyF0H g6 ha+Pht^na0B޸FٔGMwAľw;]uZVԷw/D n]X7(j8_x7Nڌl z{FQ"\Fm?ہqmoS\qٰCgC<ؚbyeHȤ9ل)V -T+B&oj ̰Vk8I =YG*O)oKkPlTSw\\in=:1'r׼{^vup@߹m>`qf|}$^bgI{qlKֺВ4ĢW尻ĢWbhB%56m*l&)pe(+!3kkUʋ2әdNlʽݰ4VRn)F09=Qv[ͳÔ^oSvVw7Zv C%5 SnS` |@gJ[0(ۍT)''0!e`''ĹLZ33.&|[ĺL5&s9J1FMu=tjB|kq O"e dzUv:_?~ =8oEi#Ɂ@'kEknmEm$a68[ #NBbٿ퉜\-ڽA!9VzơX*p&`']hQ.{C)H UZ5u%1M9WJ!ϸ)-X1% +S]MeQn(Cb+y@ɟx`i!R%k2e,cdgfEΌ+TgFǓѢ!HKz#O}dSuv֣fNJN7yQ_(WǑ]N ??.+pX@)܊L`,&Lj@j3]Х)3t΂+PےAj'(CHva~`wwT]tnC"P"`;eE.S*)H9s]Tn)Be2\GP!c'],qjMEiB!cd΁-g"T(p]9appo7"\$>Xp<\ݷfOw / ^ͱa}r0 Sk@>j8'Cr}4PN>e%Z0:WaBi? I +Cy6 ~) .z%0w*bs2\K0_jG9@o]ttIrFKv"afaJjR^x6*Qx gaJjIƩUIRj[Y'PJ7Lhͻ=R/ Ӟ1T*{V L{_e2ۻ;۫n3_87yZ`˟~-y?sV'놣U@0 6ƀEt~>V9ڊ2\QsnM9w3++ftFg?^U8"-D_:[0F'(:/JwM֒5B\ Sփ_8 9+1&2bYwjL <@RYՙzARu)0uF<]W\Jga(8ď[LpZKWj\ܻծwϟ]#Ug-Y.nlh-Wn2Lv[;1'r׼{ónj}bှsxoxU)KJ yK)ạL5$SfަV3eA RRNNzVzm]JJY["팠28\~*-n=C矗?R j[fA@m s1BWnfGⴝ-PwvrvuM*ўt 1e;[d6`ؐ^3g)ܦ̩iz\RٹI]:-bRp1g]Oy:$ZS{hFd\AcCڭbj={aoRSAWTn*+=3]NǞF!#uOMΦPthsS?$~ En فZRtB6b'cM=~P&O ZW9b7kibƷ6X>xG -?-`$H-pHM0 Oy{dYQZSHnx/B}}`c 9Nlfm/ af%-"Eq i}sQL̠O-% g&%RgxيiMӨjM" 2A_ wHN n_C 9S|EjK ZN"d9.>8<݄f+a pV_54ZXT&&}֥n[Yz8uE"w͏ 4\sK^!Q„eA׏zZ^Jkrb/ *\VEmDi|xKvd3jQSֺYH~m+goDEfѝc* 8FQx >D3f9B~zP4U=g%FjΑX4 ]w>{CFʱ+>)E'J zt91aW3m%) 3bmQGcTC^E늜 a! J1O,pTjt;W:F4TAc;4qPZ! ¢C!y (d\;'iI)hDbh2L2|1Q؜f$!sdEeEQai] YCn m!RC\᜛;FVe"d+Z3zO5# h\_?5chgyOX3D8˥PȋBT P%ւ6U^FRWږ2=`^e#թ`èL/XH%kz2Fv'0X-xG1n0cpL0hBנIPwާ2ƽAKIRŘ^ "u_J3 [pOM)?jjO& Xx9"o_\~cЁM 2͑0#ɣ'O7#I ꨳfɸwZ6]߿=zrQs}A3uۼ g Wgn@p}}~x}a{CdW?^'F"9ye(Flo{H!xs踓3UO|Y"f2yKggҹjS:pW˧=ڹ$mʏwӓkqhr_%&b[g$qpj)jDp $aШ Hտpse4@(<8QٕH@ix N?H +HPMӷ1a:)G[R_V?:p_b&w~ˋ_oiK͔Q~)i_>\3?=}Bq^^޺W>}xOѩA_5u 6+abZgP:c:scS\nRbc:}fN$̻w݆wn}lyO`wSq;fF6.By8m y&ڲu2I}Ֆb.IR8u.ݍ˃~wq{96cۚWTۭar1EI}ՖZjh9[)Z)qVy7C+=$U[j{:m+q|\EXKkFqR~ QcsaŇE0-{:q+H+L]%~氬WmY7: pnسCأ %q>6Hgz)9gj'j>J28kpan9ҊÄ}9: I1N[!PN0[5l6]s+QU r(9A9(Z rϼڷfxRLu 5Ʉw J/mj@R4Ӻqg#gW ig^׆BFS0NfFny^S4 9F30t|wCNWH0P`T΢R9{ eYPVX(*ceUTUք7`AXE4L@#;5yݫpStLV. _& Kk e)AkFڂ) "xs]Om.~vսW_÷y⟬#r&`DGeuL!i"A 4Xϐ h E@|2LP!:{)46 穻Djf$ S6H~~'; '&`7v6x|7T1 g4 VQd\Ii8:I*Nz?KI;tHNBמ. p{w#hxZĺ _ROcs\qJh:ΐZ$䤖_|0KLs m`Mɨ'j-xvZ/ m`4*5׳ tZ lNt\4LA w0hTp؀e4@hWsyȁ.A gzgGKAqdwx%| 69n~#VRL cv/4lGjG%Eެ"^#TT^-5TI򸎳Rh~YrgԜVzV*.Ye\MW/ g5`+rv&QZއLPG}Xɔ:Ie^XnN_TBSB-J̷7'8sV~#Yi`.X|57aaaŠfHOJb I_Fq'aW%ͥC'*iLㇰG%3K%+EœdAƞ7=DF}Ոcu#/)/d3|>ݣ?W(W*I2nUp-*||'{w^׍6$ui@=w~N\-FsSr:/3&RuӤ=0zidM_i)秩ys\ Q;‘ kuN} I" ޤcoĮ}b̪Fps Vʪ Pbȝ!'\9:)FAul;5kz> 'Nzq*p2c;d9V`eM.KS8qaO H0R¿(*AP{4s+ "BIp2W%TEQ2^J'LܱRU;0H_YByb{N-`ހS܌D$"EވSi,r^ܻ0`䜐Xʵ(tht!7,V#p*JLpO{nSGwQaFGă)'nyu="h䦼_A(4izRMQ('T^s408]ФK"8pa> Ρv뙿VmSM#BzxkքM%} co{~&0juAZlr5x}ɢJisQ@#!zY귻?o/ć%jK* $yI j'3J'+E9g{0ZQ*31R4z8]6nun/&@ c k _qktuﮄ\1+`VQAUX6Jm׌HyFF>68_?:m( !@mN"HfFWJ+r+e9\)WP~JT)"U)%sgR@OPM3kR V$$󀧳+AӮe.g5\\Lv]j QJeu;}tөP`(F%™)V ,J˪XҒ-A8Svp {aH\6z]LMׅ>^:{{xvz/!Fz. XF["#&(!%[lx9xScqf D~8F @ @R#ݳq{= eTkg{6W 5}z?{Ć,FlzFE$%QI+ KTUuuUuw$*zIݼV9*u@3RϏraOHyD |&hpi͋Ζ{e7G,pZ5XlK=eS|yyDcI4Џ|ϑBAQ+ŕ9[ JFK2dYfZ+Pyjs&׉r?1Ru(>3c!'wmڗ #`*v|Cuyj@Ņہ_;5 1%^soS^=|`/n=:Nx=G|9ϪɪE?KsxOq s\:u[ib =ՠ0_wļeo5dJQ bHZ}@2T5]o"XTi V2[EDmxb<e:r?i>emhx׋]:yn8_v;;Ц%N'u<----%,"}c;<~#zsmK.=󵇏vRbenHBELSU>SonMH֒RBc2j71hNwTn8*7VvkBBEt{8"5,h=+a svh{ BHݥaLMBƤ;3B3ݞF!ZiL^f,%"|YۇϽp| CEsI`ubAj(jiE#G6G-'܅vQ SwibcM޴oF~Yx q?6:2-4c,[c+N7\7iz㦈&y1{aþ7@y1Rbߒ?j':.eCٝկM3O})Ws)oƚw kTm]G@Ìdݖw,+ݻaհjG]w0bgsuj&vybIpUm8=s՛7j>[`1ۓ6:GV73jZ8Q@sV:LmZT[m |yO3C c}G]GpP>7a8Bv);q^9#ٴ2u91+*Dm`jس? tB )JqNzfyfuLf)EF)9+bNܔӝLaM6 4GL,Dw14ADLo鎞$:z'g8aeF87x 4aȎ;˧㏩_-0ywܹ|5 .t}|?Lglڙ~zm?uLo/$ lg=NgiYyA R^e◃}'1^f\ΕDWSp.~*Z#138;no)]2|\@4SCĽR7 2oo&;?HW:p7 ht'-s&67.N?{s| $.o8#cxNj^dI-_kiTi3G+h ?Σ?I/uMm2/ ]+nFqj戦7#jebœXҫPUfMS3@^tG[;wٛ~f0u3`;)|fwVիaw~M~6$- 2a·gfd"DoOl~v~|5~y_tw__)iޱP~p8 wiSO{ٛr8=h 0ؓQ ~IDAa^ (}漀{̗/\fN7|:=ūo&b,d_Y|O`onOo2#7[ڹǿ4.f8Cp;W]d!.؆䰙޴gq$IkT~_5$P""̿sSlsY[۳΢]FJ*r0vYHle?}B?us020Vw@'dG Gv<]E.QVz?e+Fb9/Қ >/LYt+5Z Fq>IwaFyA"v+PTvs4j4*JS0Xm -f^A5ҏɖs$ә$_z4|mhj7LqR dD2J*!FVR*cus|[|aZMMoA]ñ3Iv]HԵc4@ȟ׋`๕ DY*y66<%$Z%ҩ9͐}ZDl`GG9iѪٓ-y=&Ү:U+rwe(k^XYB' ?I08"g(IP$"&(MhEFc%L)L%cb !ŖT۟ee>E!ɵ ZI"%90kc?J6R4&JHBZpa̍(2<3(F#ШʀeUZ6G ;̓wu;kܜ2r֭.fY!Oh[I4N&ϧ_~NEGsWf?s^9ODp0ZCzh8Ќ?']z+a`A#VKJP% Oƚ aL!e05&a,9gQ!ĺ߄QxAkrW{lNm4u.Sȩ Cő$rn Y I*Z)hDZTHSLdx0rDR(DUBca aP牶LjA(ݘqE39'4kJE>=X_-VfZɆtth PW7띄I/X<`V m/ %KKOe[s0 `j_*:P)ЃLlROJ=[sr5ݠ~kJ՗jL{\f=Jh5Gz3\YVfo?~^nHVw;fdIQR 95Z|w֫  -05[z\\M&Pi30cl-Mkyn\\&`#3Y>Rz vx136}`ʥ{s9 Ye<嫼z(Lj3h `8Zblzy稌a.^`zmgx}mF3%7lO5&X@/e/Pgm{jp~o/88ʂ TpB2pjYϚQk8d-iU1j>(^r^U%f)sܑrUT_/SYy*yI)am(0?)ͨĥb?) ƯF[kch&%dHy DZN"xb1a(6t=Ռ aIiel" ,Da.\1b1 r"`eRj2mG5!cnB`D""Q" ) X[ =FGb{ Fεo U2* F8%&m% OXw`#li et%rItyӧbizZD~|'~Ԛ2_L7D7p'ʟm(mK)$} K>(֚p Ibp.Ef H,pog1#9bY/cM|[F+Ue~ }f/`Ljf5|j,DTUL,+R1WN"GemόŪ58NpcT#X1*H\3[!o]YG54ǹvHbaLܥB¤6p Lb †OB>sNL8=Qu礟嵯E\lnSIH(2i99^M }>6pc 1(PImTY$/ p;' ݶ^[DcPIv%I .D##@l6"d]4XLaf1H38i/oop\D9&:2UCF>;?UY&{0(HR2|~D޼vt{2T#<=jesaZf7!hw]M?3Zz~bSZJ[Aհ-7qp|xFz 59DLgR8A8z!"/f7=ݖZ WPMX7&_4kP3ݼ*#zق^~H $z',k&}ٱ$+x`eQzvT5M>F@!w!XYVjTϒȴHPeZV[ccN" P wE1Aun($WKg-걐2JrL!ZZ (e AlEhl\h1Dn5>hl}2Mme(,jPfd>"t g6z:nM&Zt@ƐB58PX #.1`ABUh꒍7@vzV6-˚E[.^q頱Q$ȭ- *Kz.LGXp^3kxR"Q&o瘽^%؃i8 .HUid\i!3r6uAfkkL.yM9ǜTɜo1'ah @8eܠ--/8BMt3RkͧyN-! !\a\! 9h?CH|;Ov|=];J|;J䫄<)o6"[^>f !DsOSbR;_YaO#mlSp2<5@6r$ݘN}R#Pn9ٌԢC@H|V ڈzBQ 0pzOgdE^jZ- Kk TLnhW^hna\pgP5 HZˈ 6d /˵ (j6GY ӟ s٩⠌h}O\}ewTZeUP*B9N>Ib1l^/յ \#X'{qV"ټjږZ&[Һj$Nb`ds`l&|Y*,+!y:g>s| Wղ)Wh{"W>)e~K)ZN:~C͛}`}}+?v}_Q1!R p4%A:*?&1DdRw{ۺI 21AT ^"Es+.D~>lko.V$p2:P4ղ9$42%EWow^j+W[if3|LQ@$ʆ)kžοˬ+`brXrJl^<61"$JT͇bӐ1iJ ZEH\&eEkml7k[ 9/RYX=tLy)C@ kzb^Z7MOָ6qmz;I/1$8rǶkz#pi')7Fr᎗mûGc*AOe~!j+w̼no0] 170|i0exKɘQ*MhE8n'4iЖ\\Z_8v6Yx?jypŒ$KIC&ұFPD1VlAftB\!_eLRb$9Jc8}ܧ, qu"fVj0չqRlSq&b"XU}"F8yΘx6Rڴ wiyb T>mLOh)$<쵣_|}s{g,uI٫Emxk ؂,J60/bL #YDW?{[̟U|%pd=\nIJm 0-Lk=50^| 9]Zq/`d+HƲ{$ J--h yo;)G!hW 72Ves,e dk$*v:@\Zfҝ!k:Wľ5yG ĐQƷ Q$#(jt:2xYPplm,LrPnn翑RN9lT2eɸiRaQjzoyRJs|0Wxi+m֛-_i9)z-V_CKr]es#H?Z -f7!Xhsd3> ʑrT62ϡKH\JPfvD,DT%dgj~!r#vp^#rT7}GSOD_'꜓x4N-j8Dy}6AA=S9\Z̔q$Ӝbhܤp%NlHW鉜+}53QW*X6  (Z>~~}|h"[ۉ:0z$t9u|<  ̴y礡wm rgOA Vu-8:>[qZM:X"W?w!(F(>)z]RK5;S{}D}J#UTBw)_d{Qs.L>-[,uN*E_sp%?saܹ\&'X1$ ЇUv>nDs{_]j9QdJ#|L@o|2ؒt1b 0N'԰AζkֶÂḌRgVKX\}m|lby(~v3ݱ_<ߤbLri#Fowd?ZgڡYr紌sq_?Յh&v %.Ѫ3rC]_nĺ!nW9b7ݹSAD 1يG2BiZ"_;QVvj|:I"Ŋ-RrDn?_hG8"!׿t"#Xf%5%lY/t*ʼ&{}##nݱh#X~ לKYh3䚜Kn qEO;;lyqL#l$* sv*I;xĚbM͝9plO7t- LXFZƌ(s)Ag,]xuz+E7u&'d}xf{zn dig OKE(0Zsܻ IGQ%OLUQR01G+N2^3H"FЧd+({5xzǪ>#Ȯ=s큃>id2u,'z<-h j":Z֋<6rm^񒲉!޳oՌNQ+%HIyeIƏI5rgג,kCPʐSڕsj.^?B0T{֬򸽈;~xOb4č GYæ;"łeZvf_=4#0;8'C "A\FGص,.8jNN_F0p\(;QfTYi#jrjv0U7C ÏI4Nˑp;`}-WXn) 4E2K1.Qs/+mங=\͢l5 R^}&NQƵmG+e`ĊJok4s$g gC%V4Xl^% #EEMҧjrbw-m$I>v"3@_fv. 4)EQ2)K7z)JdW>YJxeF|/Q}n5KEzldȊ̮+ =F/E岽H1pniW^Q`zc] &BOkA.MaV&d5KVɀdWZvsTᢲj>=oLl<fbWitRr\ ѶF),:^emYDę 0 $/yBs}ZX-wX ~;׻jD$IEЁINZ"qLd:-ſ!c=zF%ࠬ LPSE$`h*WSN<4;jus[?nw@>@H@G,>W^VqwEop ݲ mN7X!O+GP;3uʍbh~2~"N Ay@;99ڦ0͡E-3"k >۬}tځxɄ}~C*Kzۼ)QcךԕA[OʌL9olϩ7Vߓvp;T~0\Cě X&.]:^4*1m}~y]dZtȎn-? B`r6?]J[`IZ C-q5p#syK\;O>;\m۲Y<ҭ\DuXX˺8gh0'[swQ!qHY/"ӑG,FN$WFZ7%TDc BR!5,mPޑ{~f.CGdƄ,wQ6t{>襊Su%1={8@z\#Dk:ZgJdq߇ ޮv'4Mń]d?NCft:5`qFl A!e+~ ~+ l %9TpC ;!Ԙ# ϡMzzW yơה]:4ZfԚo&KwjE;P ^O }$Qn0MW\LiUYDm`0Y[E9_ wXw)vZxqRV,{&g]k 7MqREAc!TJgcƤBdK<{d{cdv+NrCd p<4DF梅|s(ɔ)q7х8E UUt!Dww:K¸("BgFـc_Coz8Lj=1I J&oN)\nW`DǴ6j1gƕ\p}#dF:B(yP ER@T%sݨr0YA,| LG1RNb5R^LNfuk ܓ !rfYk6cJ^,[hIa'cwE]K5#f" "g;9DNjsA֪Lee^m,C&X ,&=z|PuwXԏ/'>o)\.R1A` @oZ$ŜP`k"c/+Gscù-{/B%a2\>B(4pvœE7loAJؖXS;@O1#K( ҋ6m!:D00|4pwUk}q=c3!s=[6.Rf˯u^JXbTd xC=G~sBNQꐬ"Yla+A`)uױPaJfc D%{:ʊ7tBvO7Bd D I%hW{cŻBy W(vP(-Kx'e@ÕSIDAn)s[@2aggYj 2$zV+ژ!D;Hf Q8>foA:Ys# Vb+L6 aSGH ļhFhՅٯ+C.4tT-ōru. "Tt_LYbfD&5åԂ* 7%貁 7Z[Av0IAY%@Iד-e,!9ku0\.BE$0E,,$*jJ߰ гךMf+feAW0\5f.gA .A1u=87J"P ѸtHY9XUJAys:PYL5fp4$ *Lx%w1bT zE1ܖfC,!GlSYxYR0c6 ɹטn!po-"C;و/yz;ԃW 7 |*Y ve=<3Ht'fzx9TӋ3f澗vFZyf>jβ2z%Uv<)>c?sX-qWff+~Pc)3@Kl U*(rɹKLyxLKDjאrR0c{*cu јJ\%kM0<_4*~  DMND&lS$`EQD=7:u7w\+F+ވĹFTl;_\?H!e݉/9^#䠔kVg 6W g-VĽ.%-U"qs1?#Q,]qA7v-/gKg)_rpu݇=J׍K/ť]?W=_T”Ms (P(\*T%_@yϯMmvxJp]rE7<3Qv O`j0pKBZB2-R6ھѤ&+P9i@!±+ ! n9!@-ik-#m)I%LfMy\nAZp|bQ>ΗuijUVKr0]g3/@iAPz#N":C 0r;gɅHSYF %.*rDfsOܲ\ԽϤȄP5C?Ĭ=j]̓i2_)$t01_H*8Dmr0l؊{\*U)wvu`ه~(oٵˬý*+ʽοͪ8UV`n^Z:ǬE [~I>㗏q>a_\BE)'CQ"$#A Dc9(,|ן^~|R/CF<])AR=3= ܩlcoblcCq5fT= H Y8b ,R\HbT)Ju<Čᵈrͮ GVpPG[v%v=;^|⶟!ێYnt2hFwhsP9Ih!e9ƃ{VWYʇc#v)m3>S'dqXki)]9Gl~fcpL pw$k9l#mɍ1\6hjE9 Bm * c lLdѽa1 cPF] +rĖ]NLfufh%V6_uác8ĺ$PרD}A&M L+s=%):!\}7. pL{ GȽ!kMMDv1 Ce#pDQИx6RO3p/Wx ˳A(c6Gj4%V,7,'uQM3w$\Rhnab:DCL+i·}nw?ûQp zlC(RqQ1\(QPSDKJ ( V{؍dJX,0@7Y0 2 `~bvK }ڦu_ҕmRa)tLRo߷2bZ]b/IV |{&8u+ @8>$ s<rϚHIV,8NR+ⅵ$ia;軶?^ <Q1: O32_߼+qMwխ4+hTdo4ݙx]G.u:0k#,d%'!&ތMpȀ-ACH+ltvl ebDZ_.Ʋvq8rҠ{^r 4Fi$y7bEZ+1^4Ҕ0$Toْ<=N¤U/@BFkZ҅ ۿ0Eկw`{EhMsvAH,jvy-kTH +[2~v>P0ەvvr9!4t@!do:_PK;kMǽ^؇Yj&oVg#ZZK<IFx;6?zjwJt;]ԍTlD͑/wJ3YTEWT4U*ug8kƮYJœM9!m|U7T=zo*rnIT3Yֆ=c[yD9\:EzjިhK)@oZ6_ LSr%Xb#K))bs{ I?-!VQRcƸUPR0R%RLQ)6bbg#GYl$^SDmP.|P0缻\/U H-co( w55c8ބ29IGmyDЫ&h&=P1Y04 Tx)@}&QGD5TLF]roQu9XJ=Z,G78R)nCjd&Mngc&I]RJԋ ELU&[&mA+aN$y]{IC䚁QADi&G]bR}gS5c}2ot(OW{vsbVDZIaFǟ,H3jWգMF3@A&ט, c)oSjOMцY!^ۘH}CUg$ L"hi㙏T9mMԼ 8\~T^SL$eGQ #u yp>F܅InwQ{܇o{u{MW[;@[HXI{AgCL?y])m$z|.=HUdS`l2w{%(ëQ{荚SسkYt](S݁/o%A/#~ts EO{IJ%u(B~^^\|9zv2kAv&IaȤh-RY+t6c3|{wyـ]o^\S?{ͱN q=P'BՒb;5H!9( N{ƕ{^t⎇W'հn8KNgG cmz'*AZ ai:E]t6+ӳb͛%{N. !J߿Iz/i,Xoou嗍wjfv43Ĩ[x={T_ǧsq\?;of&x>yrM(#\RLOU?">->zA?3_ i 3Ll0~H?̖a1>\}qL ~Adfna)"XhxyP/ =~0h5 :< 7ٛf/F3sR7-ӾUDC '*n9q$@37V#+u)V&-vNUleuu@k*oŅNCZ_Y &%_x}|cu>X7o#YjK.G*]#X=_0V-I $F"pbEӱY =5x#X?mR?'VA X/<^, ר鄚 : savy#)0NfZpNxGCR謁4V$CxjNSVð^3@*: 4 QEgug CfQQ̮X;y/c.<}r_Ah;okn`+UK_Cdw%Ш 'u(0cdk)$簣"1hpE:ِK aҧ V:ڠϩ%RyM|DLhO!|Vdt3uqF (U6ʐ~U[#g?*mՍSHs"i{\܃B5^Xhr3@8*pЂ \0ȵ,4 *ae ?0į9q 3@o>{8t~O=W?D}wob=4,f%#D\$pvݣ%aZ 2﬚ UiP}ѪNA;}f eFLPԦ :9;1(8>upY2v1P$pJs'v="uROGD}0Hk$gѕ3؄qCM۳ W4-uR^[)5b"DbHd%P;{.E.~ph.IaGшLPS‹!]D(\$Y.qdeX8 fb| P\_jȫ諐ϛ xDIQQ{Ɖ1fK>8wd3qK~NW?R$A2EK#\FIaxw{wu\f?i:)V5UR& \y( y\YKuf;L B~Sj0sүiga\뛌ChSdYf"h$IZL\NXKT={ϫ"3(A1anvV`J2y)e\zRno>2)| Phɚђ6AҞ+<*V˜%LFJ3M8-9]^#m~&VeD}1#'2#Vzfl<_nr6l=R%41NXc<8 ^# $% !ziuL*}h1qj6a7Gie26LUɥA GeavRv$) X{=jN%^%BGNJ`sUP"녈d,:6Zҽ'˺l4gwApx{CʇY*R31" `$@Vq>\K8[L4LBߵs^T*&;{`;6H;Fˡ!T`d^;;SJQp:>3,M6 *2_7JaG6qCwspqXQWhR>ˊdbٖPݵ!P_D*M4DhޡAYRp& YYѬ" Uy0Ev6@I@6fQ7c_Adzf*/:gVvԻ@_ N{c jd@|³m$L&ҮkTHdˮ/t=lWuuZdCw̎hv v"A*Gx1* ؠu5߼K3p@P1dENJ C*mc%f=V@5CԩD@5?Bb?n_g:Prb9He`:1 )KqIsP+0, cIHBp"@&`ퟍn<}2_Z=MìOnnAj} BMdaLڣ귩UaT㝹*KL2kyu?`&2'd }TXbPL:5Y- 5bL)ځ)5TQ򟭺 O=cYϬ7NoV~cYkf7P4 fPpqnIT3Yf{OF75Ȋ)^_+w3Q{p <$twIU,UA:|JϿt]Q5_Lw˪rOf_fMNwoWo`̹*uIB˒x4I9U HW_u}G7wx%RP*D)@;ENbʝu$.Gh6S iNWw@CƻKK#(abqcrতwޚ4 b>Mc>ꂆ1Fx&J)}YiiKZ3hAӡ=`]Iocr+os, ]"AVApuۖ}-ScYuGqiJǚȪFP)m[ gB ^jfk grg+JE-C碽ERVxP3)`ש1:m7c%5oڝ2[V~dR\_.[-cSQVr?D%y#{z1z7t~ ?: J&Xgd6^wExf ƊT#ah8^I§Sk rWp-I jAzIL嘼1 -1BH[$,8љE$J˒3 :U܌k c [uQ%ZB<1gDGcEo$G>,.w?j`H M/ Ѷ_MbԱSIy]l 6e-Dbv,zix`X-b"4z? 1ԇ7P#\=ֈ~v;6xdNej V+{Z? 6~gM|~sQzgiÿiw۩n[#=K5Z U(k)8=G `p@1+\2 $X$T\V"9n#M U9Wl"&|tW^4yp7pkqhw++7\FIr5)"3_b^Z 3&cJfZQɩcj£yy{OB7b[^J@p!HH34623T0N2k3 9G J+H2%@qɑ*A#?:y,D=bx2(enj,EHXt/4*:;mu0_w;{¶z.7)|;N5?qjRr5@MG= %}/ łYIJb6@ { 4x8rc\ma;7~VOjhOwMA`AwLA} 5' a>xG6x^JK љѱJkUMuJlR& r]&(L:[BRB*z@pݟ,Zlp|mbGo;4S0ZhdN-A{s:x$пV g-YwŷX8nt o]^,w&򉃮ݑuAk`e1h.\'>qT8Ksity,buY<)HBJ5b;Vrvi)N쁑AR61FwC;1B#57I!]6@L'瘓0-iȩ!nʚ4$:޼]1/l oOi׷w{?UV9S;`HϞI$fc| [>5~߂(Ӳ*zR=P8&C M;Ko>6i~5Ac*ghdJ:R@%P~e̵~~rhue7]uKٚK=r-:sz:YvemE~oZt'V$-e2Ц]MqMmGG~hч!7 * ,/V?:HSf!2puJ"wddArǴ B|QdC°8P0Mk+c+Ӎn׫;#+RZ7n.҂?:x 3砊^1bp^t Bש>kؑ2gā0wjlhŦ!,,ntz':&4#3sޙ=D0<-j/S 金#*&=;M%?aW\Sa\ pA= ~\6i̓'LgӁl\@3_muNjE[bHv6v~dk4V )6 ɹ.M},3AJJ8Fxfv<@hO >e2 i;5W$W[-ǚǍiO#,]2+LQhސJ;]X(*8Eܑ߫3 ZŻGF$ɢ (,lٖ~,sJ "0QAW6{#۸Gy5\&[g:o4`CšмS]퀜,.崎yE|R'!Ώo `j^7Opqn5enS;fL(fچщ0ʉ̇ kU?Υ{5 B&OnU^c҆ÿP-߃a84jl3t],Xfb.Ȥ=ֿNN~9Ј SgX8u:3+ͩD4cm ©!9N&5>bD~'9:b -/eҋ}g32#u@Lr"f94 ++V!rhS`R_PJE9K^Hf^>`h&9985:|3!r#6p'b̜VzE/5pk_hfeq^ךQCa7V#XKTK ^_~76:{Wg]uUUZv& l6J[h@= K2ˤ$kAt` FǂN}R}9 V ӤjIE8k޹gd]<g])sK}δ‹!INU)-9#zKQ*7.6>->lܑZ v`,9d$'|^^U̟e~ˬ^V$ @˚JX2v(} 1bbk6K;M}p_ CL%$"Bq&u6)qp<3fJ䝺6};_17{xڌ|JpIyɾN1Pa^ *W/ImI W]Az]Ƅ85maտ ";V+ƭ/:*cO*}*W1Tk ooi7:O~%ESH>)ry}C@Ogh+77ѫDJ+?wj_es=XS!`Sނ;?p%ݯz^`;+&<l8D}"WءS}4Ӝ;BHi_a?e`e *-՘=SևS4d&Zņo=|JZ\+3\:SÑ7LrSħP3Ntg2G@Eop& Ȭ@c6V34u6[b!H*+Y]r,G@vfKWuY3?Q\,l=r{`/L!PFT'$%o͗nƣ[eѲjQL0hȺ`:|a-X 9Y`XPgPlN;/IiqoU;P$#(ٻFWH]/d8$;` f1W3c3[֋e[v"tR<,L( e=~S;3kncx6jzf>?Q lHq+jc㷮A]}pڍْ"j] _E5#;JZ2Ay:@˥Z|jqk0:%Xr@>VT@5p:SVb}L71{CmNdK>BZ@ u``NY !CaΎ#NUJ'WPp~*PZ=ݾ̍;H-AZ܈*%Y:(myQPl*񙚹s]#([ێwZ7o z:&S# Us! &w7enT=O)=o?MS]%}}KW';%uÓk?ސe"O` &mp~qCozns:cz#cyĜUc&̔F +MYŜNdE'츑&lqZ%uL,LuStyւ=l˻qE^~sjWsp0vcY[nͩmͭ <n#6=p9wlGz}E)(y)vdd>@`&AEU0gcj#bNeHhrFqypMQ#r4@'E]еNr)kt61HGFeՒK]YJp!4Z$b.j0͜ʮ:ytFbo@]Nj JJt3ء۝<桬j¡v`[eUD*vz1$\ytgqE 3s9Ċdbp^ y1]]/a1X'W!= h衰$뿈k%$|`8~vEՃja;O᧋qFtҥOZdN8|sţ'N$rE[? كiёih4NmxbL0v~iqXgiz6zh{i=]-C!/en<%w K0up\wm{ [ Wc{aLhDd`AgҞ^MV3W:k4nꇫ?LԺ-Nv$?8%ͣY=]ˬ,ca2cx41IH|'O_Fq|ºrqx.e]()T218i%i<3'8[n8W]x[Aw⥠xf%m_ī:$!!բkmBMCPb:2)[ ToĀmr&@[`b8 VyyFcרU_ ~cI\'cӺ:Lǔp=)"7N2aPCBwU@T(x0!ȣ)9$8OwBBX0GGfՃ2ihl7ύ{qʝ !XHņW/hrSc_K?=zvAtʙdkAXհTcx}$F6LrYsY7sf.j=JO)٤,Nu={oF/Xs3RQs!sFҲyo3ߔ>,v=Vo l&V}sjk{P=-V17%lQOMʲJBXYAy3FSzEYpQDL~DpMdUʶ6Me_4kA@a [ѿٸW%wf(4&>S>ևKje#ֶ{jHM/fb6Zzzh*.{GK3|P |9 AD$׊T+4Ծ!pөW-!3'g s*$4PlTDn#fǂBrɄe*[VA̜݊RZ^R\}N֡mC ֕쥐ZEEJX%YUZCAtJ)"c3zb(&SBKGe=k/dYnH> qA#~ּElQ!n:C f큚wNWP2P,)[b((ϞCn=s]M GQnB)$dw*{\(x09eRqddV,9{xqY {=,uS:%(e A|m@/OF Ѥ*d֤]t ZWݿp~kgv"wqת/Gp$`kzB8`/CeOaO{Dh!HhHC#OQSXm`]9ނ]^8>%JQ_ti\]nc?sz]ġ 50} #Y(dI/iG*;̀\ ژV+XhK̷'~rҵ5M2.oS8-LX "nş,}X~mW +-qE5Y(c62飃脴w1X]! )vK~uh-~;o ,Lqb1!YT]pڡγ]{|a=ʕX!}Pd->UT>1+րi%t_l+:.cjWvKIJAdJ✵V9E)N +%l+22z@lq^*8=k9gnXgYN4K<~5[wח㯍"Fy˔%̕T"JM^n?tR{뼭deNFvp|Y.sjgχOBY KK?+=rDqyЭڝm%/Vގsm& kH$#Iіlh]0E£8褅^74:hꑜRy9s x!i~:27XJQs-BDNGiy(Ɓ:cLtFdwne8`UKD ~"h'QHCU<#?}SD>.ɑ hX7hZ$10%~~GV3TZH[?@V 2=eL(X%e< {]3Q%, Io)$ͤ{qh{^rP!Wn%-tOCt17 F0 #]%37wDl"{њDIk3wA>pzm;.*gX9[ GÚӚb+$MnETU|̲kbA6w$\Tl ݳiRJ}th+mҡ΢C۹=m= l])'TIde+k$Ck !P`5MbYnukӫvUk!z ʊ~ -/ t]1]I\ڐOV359ǽ"j7=Vr{Vi=$Ӗ'V>6jq1ıMuEv#bco̩MkEqRY(X:!3 [(%yfy]hgM P8C+SlwӸ2 #DP9l 1Ձm!DD`k96Z[1Btse:nLIC*XX5s VXZY)WmvM? 19B\u1/=&h|ZP|%8,@2 l)nIə{<.Zu=GqNa+g{LEBq3&= &k:4$IWXۼt~䁜^Z螣*xT-T9^ nq&vbYQu#Sf/SrhzVD ZgThx]7c]΄uXr rMuA_x3R?# ۚN갩&=|2]x D"ZA<"kQ"춻ٯvAʹLGNb T~ԯ)r #ɸi2.=^L|cqjRu?<\ߛ`] ,i0'3eA2͵gr-eGM|eV$ \"=Amq%Sȹ,_ۈd'KgK2Uuj!P{se?%8BXov+hUӼs÷K?vb7fKSd-PjxyJ@4f> K2n`Iu H#iy֌LE5#z]]/n/ٗ/ Npr8z.)V E t1KI`)&L(˂R"g(3^%l\lMKK;R'ԉ9;1Bhl  Gb` ?]fwԶiPUt,9-o M&f2Ȫ˨d"gȗmuu v׾.hA$`-"՘ڶ'۪;Om8N.*mZ aL+ ,Yt.4 {Ma3HqA6Qa~݇.>#nϘ{J%LpVRAڇ +Ί^AFō֓c-0j<-Y20RGϐǐ8V6N=e3^kAY뜅n!wn wӅc%6L <Qn#.zZO4Fۚ,,)lV|_/u>+kT6f:΄ GYNq.2:Ё@iE G2Cl@-ɬey8v^Q>إ2R֪XБ3eh2ыɩ̷]lcH(u YZ2KgBZ>c1>E\v 2!2{$\St0PsSrtP,{n=&[MÍMN]`9bgMj^Bg<kiK%b7ŗ`p8SVa Ni *<]ZԭٰoloH1m+x. »c Ut=0ԯDtd2 d32TBY8w=+(]Pp΍l޿;7E^B=iOrt3|__e~„dTu" Sr4ɇ?g+%^wuj6p4y ¸\Aâ{Ϻz2k^ݜO:~c1 [^'!Nuxx~ddF>>EfP@?fy99~s{K3_D}3D?dpD&:J"cM\UHPRȐ6ۗEatAQ׷ils)Fd>o FzAV6N?Sfn';2A1ixLN3_i'?gX>n(i]MiȼB)׌fXo}=9h0iRR˼gyRBc*ghdJ:SZ} *b2]pWnOƺU_םL8^ߍӺg޷F Wq@UtG|mWwͮo,*Z0]ggϤIc= ]LՍ^j:{7qCOp>1pl 6ed+Le2Jqo)&~ ĴU8}K6}=~d_dl65b*l潒L9-`ia:OҶ>C|ޙHNO9`#*"yVؘPΦĸĥA ;ծ dS}2*{%]\\{zҖtirڝHew}}jZ;ҔIǪ.7Oly0{W#z\GgjxS[|aZ:abF#db$Jz_=DE%dzN2d\ G/6<^$X LPw^ОEMѳŽPIfg &ϖqިnњ(,i$Ũ<ԖzhD1],ϤFRcqw=GC`lmaϛɎ%@Y{Gs@Vb0o릷G4MRz x׷DdwHwI  +Nɂf^ʑ+:ّZԢ89DE^K"Z!O$WT'v9g NHY8C#d'P>N%⣝>ɷ@Gْsdgwvy}1c*m,C# |v)MD"?D׵Ov [=[J1BB/)-A~-6w)9-DMi[zÂKѳ۳U4sڐ^|0/ĠR_rӀh }DM^s87 ۵~5vW]杅%ⲟ`~yg!h@m,|>?>Տ=u7{:>_ge|_Vaҫv 9 E!,"*f%^BtE^7j;v 4#|^6 s~v9c o5zV~KEOFUf}V6j>ai]bHĢ{f|t"k=8GŨ;(?UUty;?b8<`YTA2@TK)|%}1ff-dd&ro譁3} t%Ё梿FAP2G$$qk:.MU ӢQGԙᗛM6GsN} <TĭQ,NuB%YSHsE!KS+Y$S5 1^݀F`'B;&+؃o|7T4si8z,uE/NfvgL`ыպ]E0l3*@p$P]2>͚m8mW$/$sY% |K CvE%`6gbZ >Y÷ϯ/{%$// WT(4s&&9eNM/Wѻ՟P<]! jX`X.i q7vvmĊalItdp.W]%lq~>a|:9ׯ,-g^9q^T'#JiDxwDU8Y&ʸ:X"xj!G:`Ȋ[sz@: n{Q{!#mŅ;w?{dG=+h?Q 21MjpbN&jq3)@C=[LKjۜ$^֜l>c׵{ -~1QƢQ)}<$fMe|k !H!zWBK]}R]tP9N32X0bW۬"@ʪ3Zvej%ͅooZ[}7 WLGq]C<+/qQZ"H]lI¿)ʚ1bV%bv$w_7_fsj 1kyg#y:ዹon!2,YZTX01BJP=`1glGx& I KU~Hν 2 !Rp.IFK}ƶvhB9Fï[;iNm$OIJQO~`u0T D iJj5ӑT-m M1+VGgoޭa-; .Eߓ(sk\ A{g/*A<$? ֫ xtBB3jA\"Q7QdIbo0y$0%#T=3w u+9+yVBвX^t`Ry8%y^c*}qAzʬ$;tmBmyƴ^h Fw4VBDLM\ |}KRI!H# IyɉihHoSqoUw(!q4}YcKkb[9C&C.֯VT9n $*{&m ݣOy`óotӼ- ҂isAs)&GkcgtCFOO c,`T7j(DEx#B~`XG&Y~IM#y|g,v';=E>#lb)k&8ZWHRr[.9y#ۻljKxd6!@"(&.#_>5]+|kuӔ`rMCO ٲ 4cyq}..ZaAvY -ӵӆvA*!yխM&} 7 "|_d_ZGÕ<9Bm.*15:`Y'}@h'u7 $ : yYp|2YNGLulG,#on6Ekc+S05IBlWEO ^Abh sfYqe6VpUb'tQAXk$TGH_}MvwM#|M3th} ZGfOVxjcp( goʞs'ޞVLYb6)HA #6`'E8xyD^+&F d$[vA vs4veŵ*L zQ`): SBKkd0TW .N|w 7w ң뫢u{֘۹h`; yEA ,T֑juHL#"eV 1'84\GXf41f3vQRYJVDL 8!ė76J-]xh,?KB!9[@ˍ;yl@K}I!%hR^ߨ[/~;hRqm ~.mf_Æ#@XBvuPlXǹ_|*@'#m@hY*O;cHaC>1.]شD})S:cFćJt 9aMs]\5-ܰJ9^Kֆx &^Iw<+;m.{1[$}qE:,%hݔ _kQX@&m,QΣJxDdv%Ry -Z?s*(EU R=HE 0RNa!T=29/(U9ao~? _o/tOj"ƪNVupkSY ){pWv^ \_,+_~5ҿ=E=Vc]cCc>4.YYk_JE_SF֞nirY)JN$ -Җ6wc2-~%VC21:"ReD;3j?85J%޸c2Y{p*} )\[M.wF@4}42w#QjH>fe*ym֓%gc˞I)SnYp|`AK^GYla8Ȋav@p3-|  #]=1cn>5iz8>L3u``5Nĭ-(_{4NK؂8Q?BZFSr@sAꅊ)A#.L,V~ &Gi;^P8N#ƒųBHcJ$V""!\{Y `LM,>noxZciv{7lՉ)3p/7]uw֣eqթ(t=C)U45GE 6C7F\&&R*0kgiO4-_|@RÙZgvڼ 4r ]yhDf,YEyB/()3إ'OE[ Tݥ^nIB;t+ꈧ)oM/Pz1unabow[.ٮf!4Nx_ؽ`v:HrG[ Lj(( F0I v#k p$4d%n=2_UD/T*9 2dl_og*cuR2n!l nzZd9X>~$5 G2u3"7˯D m`_>(?<&ݦx3mfЇ5ҀF)4rP2m%CfZ19aꅈ*Jm}1ȋ[{uRe`lQT L!pxqi|8hyMfYKZX7,-(X9VՌAT_9=;FmW>l%6*vYMK꓄H,AX,zf N"׆nU1CYXR|et3\(@^{L$w C3Rd9q95)֨ U3;%xg _U)5T#a%#ư(fHxZLE3֗NMލ'C  v4:hY%`F_h E[dvp1;zA:;U)4$W&ad!V26$Dsi<|vcr5ǐ(^D{ iuQINQKr<7QaN!A$K"*E{ |i˕HH^4 ۅ9IfR?yhGpg{T} r(+%J(,Ih%aA7u(CJCΐ@ 9l薁a2]*vb(XXidRm $á92"i[-9~0`x.Pb.[](K*}bDD}94 vؼkkP_X׆I+Gr~`J>xbTEH0-p[ ˘F& &ڊ4ZnB0*`Qd\#9 ,3@aM;w$bmu ̢J,j7nbAz/짳7erONs?OF m7cv+>۠f AEi"0 ]k"A) $>Ǿr2c_{h,إ/QcJi8+3noUE~q[]Y.%\Ӈ3ȼB)6.myR8G.8Y6#`lT> N_3)cHD%>?Qcw[uyyv4@{.' '@y|#}@+Aෙp8O%[Vfy%beN\Sj|"`EƽaRo^egeA8:g#$~Rj#ch9nN N+w)W5+1CgjaebXT4*Kp;) g΢GxcFopɩ! 55RAn-iߕ5>q/ML>*U.de|HS$7H0qgcF̘423=@ӧs|!rJeTq 2\E|(yfpBsQbc0*/x c^>ժh#-$3f`+fWpZBHv O? ̜:[zoͧ't=sXJVSDsi(@..⇒-̕(]wo8s/!luj^;}~@Zhk? @5u.VV,O"D0--("vzO# `"{V+]>ƒ,@+EXX1Ac h;<8K}n8%$c}XO4, Ì! /䳘B2p_tnݬ^Q^>)WDN\(u~1x!c;-Eެȓɑhi6hS "wǓ_5'7 F(5i,Ne[z}RZ@&' EF@abː̣ -0,(\x0`UDzSbςXo ^WMxlUs_tI!80 Fǧ'-@a``kV6֎Ԑ1)A`+CLR 0pH6LKY9y6g?ǂv$d4VaZQAyu5ŵ-K9 6k%Q%:$ĶO6GGZCm ^FH:FWb)9KoN"'pZ!@PpkrE=Zcɀ >D+<5QEdJQ& W4 \)s1@|{@v썞=Ox)ٖ^cQIJ?01Y؊x)qi45]{έ`.L`nL$x;P8)16xk볦RRM?oY͝^qAMS' (kf3LmQFa80BrCM-Piͱm`UF+*1O _}>Jykib2zꆣeZ8*dx3- J􋋍ZH_R}D귂TZ~okƓEZd8Ajxi-례@ J`s@-[wc6ծ)?-kvk!3F-lX\ #n}KZS9nsgAW}o}$ti<6Fcwvv˘Ń߂y57qXDGp ``+0)6uwf*wdPBQ׋]zרY36w3fj̰bas 21sU1߭(zq *rUP6=`,J2@q;-G}@g]z./?W~mv1>:GLwPTA{ޟGë_ۛ^|:y>z;zlރٟc H߇0xV?ﬥ3* j`'߽x/J-92x㔐*ӋAq}7N>+J[r+&xIQiU{g$$F3x#"c)PxRkcm(XXg\!8}sX 2&腈!C33-%V絤ɭMϚ uus&'e[vhA` ^;и#Y7r8] 3H)XMRȨlQH6%PG9%&A[c_2,➨u#>_1L\ 6[[ vԽsݏj{=fSU?瞸l<1W>9B;1''EtQ +^Q%ϰq;t/6:CkvZy'3YDv{=$>ҚOg@) (# _T-xQ>*wc63nyZGzuS1'P VH$J2>^f ||Asw@^x\V;[>EK}PF虴98R;DF D|aut2 b[3`QO]̬F-pF%W_`8M nsbJeZVeU%<H jG>XsN,}첈SK /}Ŀ ?hfN_YdN)s\kamqJK@N;xrLg"{8iHId3\)>lg0>S(ZJRZ|M6Cu'KZ.~YTG{9n洱#8nݫ#3e4^M#;’qnm[EwFz0G[HI"P“9›$؛|LqJFRrNS JM94=zshm.%L]p8m]>ݿƞXkv٢W T5aђHbYlBm3V.F#ł IxB+t w` D &O2TMc)MC;<;zQBJ15lٳ$Oݬ86 1iO} !Ϧ!<Ϧٳ[xqy4dڛN+Tu#)m$5~ȿ3Ќ|Vrkb @x>G(~/13#&V6fxhaF:PjAi& KCh{COuLLPLP IWX+xs1£0ұ' X`!uxIvCw;ZM"J11u@G @'A>|Cb |zeNaM Je'BTeN4$ msLg )"/1UMWC.(/'$1ž_r~lݷ|Gc;$E`vHcMScwX]H9P%ȓ BNi@DIٴ;6a*I]5?}ó㜥N4UR%P04]~S+NEݨ!$}pʁRVK\R_gǓ SD7#]740Iz1F`fVj̕G-w.C/ST#(ZT+m`X(P{)'T ڤR+LYj2l+lPh@e{]ejXti pVn({o QHIP*Q( km(k4x:&+f[#Lq!jApϲq{R~1pn{$;RHXT K!?* dߌxޗx˭(.Eb7”4u.jB<ɐQD&S}O͟8L8_!Fj}0E QK]%fOEsZYRx&)8xQo[ԝ:a,̳8v{oQEH@jrnߺQo:7'l^^o*}X1'/WX% -7{r7Mvm&˿; B7ưJ)wחmjb? 0x+ keo^g>Vj{Oa# zwR/*W竖ּ"8ko,wZX۠|eF4ʠ[Eߨ?"(E,tw:_]н ]2nd⅗֑ms~|b竎1/ɹt yIɓ@@+a)Y]ܽ]43ntvGmgF@Pʅo%Uho1Je0xM?c6$9HJyo tOc m=|@?l-+0:&& 5jY56T[G-eRci))-N0h[|bta{;؋(LpTNeCYj]"%4t ! /,;lca<*Ti2Lpl%x=G=Ԗh'Vjegz/f R{a/_Zv7 ^(d0ʃ$F]EHK I <8]xN.g1$ƨT+66i9NA*W(^ w!f~MKݍ ZQaO`:k1PS" u(:%FD)6SoP28#_drM)Q./e}$CjڋrxN[{=ɔ#BxMNq7E4gz?әGӱ.-c@8 Zak9u$k@Ka PIk&{@jes; 5EH|)JL*Ho2;C@ZJk~{r!6[ t<1n{L{=snvG- /QǢ $BA|rT緸6?Eјl r=i I [z sU4otAGa{ Ȩ9#A"- 1vj;QQ ghfnB휕ŭ%s"7]oơ7bBv(*cFji (SvTʼ"dn}2Zb eZQ2*ux~ 8fB;Cod{8S)jpU!`UNEa-& B_%J*\apj+L:Kt-_jv'Hps!.&L|ٜEwD3DBi:f4=tQ`(FGuQiR@ SgB<Q=/"iݕPI%(Q1$F?3qenv;4̗V*ki E3ݡx? Φ2zLbXJ2D1FiN7+svLA$mxE7GT 5z]qB= E(uI4ed7C]BLw<ljP"B1VC,ht ?b5͓L-0 JT"g']K4LK$4.V е GCCOBbp1o'}lqaG,݊Eԝdn fCMsO盾Y՚ ^ʼU R"HP W|p\5%Y|*щ4Vl,H8gOrBT'5jϦsuӫ؋;Z NIE3 PR\\: b}`6 m5&x K"B@Ebl%RG," )PN}?DɹFU${s9l+y*#Ѷ.%RgB5t6U V89)[EB,l1 k!&~/2iL IϺh\xn>۵xi}h.ytE i6LbcF$Z)^"1&P3A/w\iju <2󢖨,Ze}dq a/'gk+we|Hw+[]cD`7@,7`5,{ $/]l@e)*L(;\%/2skZ"9I(uG9}yrTM1G+{;¼IwD3dBp:t`wZ i|߁1%xګ,y΂r^ 1rJz|zRT&be`}x${vg Xvw202mƍ{Vj ʳMRӁKHbڢ/mfFļ멻<_ދ3Iܲ-|{L T.Ѣ͟,nq+YO'L*j&(HUrk)zWs%dt(h銲_o _{.Rpj/{uT+[֗ҮT+ P{S6RƃtU.!YٸyRTGM9^- ,"x^D6Ei-%H& '[skwբPFqVK=Ⱥ3JМ5f;, vF~sBoR1Gх/3IY޲v Ғ x  xޢVT tZK43B=Hw1L0! VL27[KC՝Z"VnkSR9Sq,$2fՏ/hjǧ\H۝#XuE8nuTιjr,ڨW.M"U(8 kU#.5C]yۋ;Z @\ LX40K fatiNFPByEN;"juFikP9{L I`RbH}_Ա@.`zL}SǶimy;VCmTI%㪢O6C2At)y@K(dr!)}0F&:h{pp;׏EkB[\]݌5ڇv;0Ra[zL͓eNso.sBZLSkMQӑBƸ\V9ENph,nك+YޖN7~8S{s\*3f>dh%t CIk;Ұ밶; ~)_ZuqLJÆ-W5T^aޤu ~ݓ+JVͮ{ةVjq[ŽO,zXFp Ǵr بs4[ lk{w3?U%:1NNv:g\_i&haT[f5 |`84q N2HLZ0k޵\ȣ &|fЇ߆CԎitԛR|kvl {c!bwGVk-() QI(C OǓ.;#'1 /C݂-hYNdzz.(2|GlcI9tu?뎝Dp}"f1'z2ok??sϮ^(I˛B>j=qa7|m眝#ܟ?`b59S KrF8 )dzFH?r!0qn8ݐ"XΏ:/}k_b)@3\4-7S|~g,ߵ9V9g*Z圭Y MdZ̼]i12Lz?\by<\']~n(v{F۳XFk3֥F3B=R4E!V$ˑ")SPW$kP(7wM Ψ518&gM m-o@^\pcONvi҂pXR'4~hpfS rPd_rf ?碚״$#F4/}mC?Q΅awEvK+u Ǭ'eO$77L)14>{'ŋƿ XPH& @qw.`p~ݹO 0͸}AbQ,lfCҝ4DGiPXtps.=6[)SZGsJ*[/,g0,A[fFϣmwp5IUZ2HWyPfLq'p.udV$TyrC~%|χxv ?n=^d#.&O?u0ΕWT5^:Tu\(XЋ' "*7D9G-DBso~k`V2DZ+W&Ihɮ; Y"K^2A*\gL.GOQ+q8,Iy:X=3-U~O/ա#5_GЅБ:nֆ4;Pr z'3/lc^YJwq^m{3"ɿM=VsVيKHο#񣅾aڻ㥴XHSi:dTo!CƩ(h$BU!Csx^%EE{V}xMoV]h4Kzţʋ'q$qEoCe8u3|쩒`%of}0wI#ͧ>&%+NpJԀCO7(dȨh*5`⥴uxaz>tϿBExyZ^s k=/Sv,:;:* 7VH2Δ~Ar]ن\H\V&-YqŠa攅׻a*`q ߉} QD$DtȚ9֓rL(o6ϘKD~yUm%`*9o3pꍋ4ut%2',4 ;-Nsb_/?::6lOX}@w']CLgeĪQX]t;jթt6^d.J`"ur a89JUJE٣w*Zs#(c |.dn4 iCCw\j Zp_OVNZ; 8pf{=-3mL0*)Sx{o/.'㙅9:Y+'\W1rIf_U/}5{Ǐ?Xd/~?7d~߮ ~W<.ao_Ofu6=ݨ8qLn3Jio^p #ѿ _VsVnt%MQcs4.E?KL2[5X78,hrg.n~>b[)Eoصo)5ɵV}>~/5WDU3t9sHК 籏SvJi8:h`=Md6&hBM ~|(I= J_j>n|քZէŎ+~(`;dZĚvšA\smo[/[%\1K󀛛w~?n0Q ᨫp-%hdPerׂ5OhRL3Ѫ?Jmupm ,\S^p?Ayn$/sy%0&.1Ar#H}xEk:_eN> Od-s3f6}wrM4[O.WeAxʇ?$W wgpb]}Wgd3Vr,ȼ\A!r_eb3  DE浜,;H|[x]$lG ב25!#*{\é425?6~K 6@b5e<ˆ{f۝zwl#4sirT=Mh \X#ciYSgP^M|YJHjcb1\qE*9ID;,IOWVy~1}̷0䟦7}ןN>p2ۋ?>Zs!^>6⾡'/ V9 P5c1H ɚ+4d)ޥSj pZ(9۵mޖ^5OF$H5M#lLږdDi 0x2z4)-RX"ݪYZI%Xc4FRJPHr"o>:Oq趇_14fZح9 fI0sNs9 ti&V^:'3`=xgG^ktQyӪB+/;w`J-t$pu:Sszs„6WGk$QeM9EcҼ2Zϙ#P7ze袻f怏A#6^%a/ K WFZZ{*9%;$@$wb^t]޴Y.^ {a~{_?'/O/:Kd>˜gn]yW[?{>~y/^͂p?塸AW$ÍVjvOvnvyVg{" >}'O_˲jxw sW"}&F֔ XuWBBG)% Ǹ-bbdl UR erw-Gk$i1joE@o )Cg}ȺXJB{\L =Gl{vE%[{WG 7kbuHt&8S7GD;PO[Wqۍߒ:Q1Tl* j  (Ku1ѥس <5 ;,z?j>vL 5d\y#{u0_y))+VpqgE'I-هVYYu5Pf+b` ͜J>U ouoilYAx`u:r*/.ۓ><hT5߶:jn "W2#E#/:MQjXc3-oh93 bK>yɡguFDc9No>jz.%R(M5cw|'_@:6>͔ iU׾[#9wz_ؑ|Żl{tpYQi l6@1"8sJw)M>}}_]:Fvr)e_ M>Pj>@@xHy[q!=df-WџJ;N˜?sM7B_bњ[>Myq7+f:-Aձv0=!]̉ W.  wrG%;fE[}[]6@7l%]N|vDTo>e^Ǐu-YARaՐVu̾urZ ֲ:_$%kaxcVsP +<3Ðtw)&1(ϋn~ ~>rFB;9+"bO̥y'OZ)Q^$]XQDQ riz.Wyi|vQACtީHwDC9;-I]r˘=|Hcz5W ]m%IP^%҆&T -*7N)`ox>]u}Z%9Yk޷D0{y;t 5U)>}z]δas`bFGeTs@>4MY9ᤜwsPNA@QC4FI$t3⧑ ;W,eDoZeͺFYdؠ)h"/YH#R#kbETM8F^΃Û`f6Fg՞LIUتUcQi̼1/%uP9,> *ܡ}VԲvjDXWI4tpI(O bS\ށ`:9(z4/,5˨LIzpϙf/Oψt̵UM}@]ݰ}`qؐUM.+prM2b-m Bs*fC#d3.K) !bP&6E܃SPjAJ*:kY݇Hx8S &e(ޱSeEF;gV}r%uջaH|Nap)7o{A_^%FGusi8 9^}xYer0Y+\tJ8OzLoOY6(\EVc7¾H V|m[uYؐ wv%; ~M3֪7Α#h`b@|fEOW.sWI"8k"cvW ANH/b}d-OoHv~iAz.8۞Đ WpPZI6ED1rf Ѩ SOhf%|fg]/9*DOyr{{Ţ}H29}d"]Kj#{!Ft|TvV 7JˎAr$ om ׉/˛~$]sۊWpZ2]̐OO|ђzWGՌY3ߵWZ%tvKvJV۠\c",=}tZ*kH#| M5G63()ED#&úR$C;_mNshe#WuKsj: GԽ^.`ޣtʽSoG:# H7o6g䥘<0{& ,Y2T>bP%"8SɃeۄSa Z wc.9ҜN||S+%rN#W-ZgYJ,}2, FbU " O40܍B@P*Aʜ#i #FKZf#c%DdL`+DuhЮ^Ѵ1]HLkUWXF-z*ϫ4;2f+Nړ*qOlKQI4nrv;bd~2DLfh H&Zb>J@ѧSD=8M"9zpJEiQ[a 4:fTFjmUUIs(J>eOH9Amvj(-njR#19ZC%dYh[EQٝt.Ѭ th#*C\Rѵ鴥Z5Y\L%O/+t%d I)hom_2CiLFBeVƒX"` 1by1Fm)^4XTP1bݝdEF?Z Q]ͧ1,,!Y9 #`Cr5oHCBcfu[݆BGsӨWEʊd1əTp^HQ IIJFi8<C)q.Pʹc0&ںԀVOg@S苑)8@PnF5wדbkܧ !hUEZ;m,?~mFͽڭڨ'7gi_@+(IXasqӖOk"'juX; V7dMeAT6HV:#ĉg "G_MJFuLJ]e, O#2#Fde 0D}=e_vp┘NrM3uu9업o/VHD' ,T:smO;}λ<IF Ӗ֬ nlMskl$\*WB.@TӤИ(K5(_)yȉ:o9'!sND(EkEJp37ox-˔M>|:` _\&ve !t_'1`]iCP,6Gu>j05<+DK8h/w-\ Zc:ʥT9S-h8;j:Cc+3cY7R. _O{ׇJh~{6aADcu&2~jL2UȀS`a9klFai>04e WW2܊,3^ 3 8nD+bB KQk_|qQOTsFvTA■b9UQ5&͡ZГlPBvjRfB_2Ƞ,Ĩ(ނCW ht?^Wn+@ xrt?^M+ԒyKeu0_uس66"1 Lj=HC yo`6=4e9-g Λ=,]~<S>Z'GOH#n -{悊At7֥]NAUɣcJLe։3G1ٰs bTƪ}5xu:_;e(fxQ -vp_̺ħDԟٷkHz3s͝ 7*;9:E77)OɿosB6i9K ޛiyXjID  HYIIKl\}\{wu`5*t.5 b 'zkhLلZFC)Z4"sder25L U[JЪ\wI)W019|3('ZFBCLG|=,}/~[Q$-+j-Ӓ_ՂG0%1?!>6XP,D 5&{P#Yד^ voRB*DF◮G0Vܨ=^ zo,nksttğO`x#a;vmGa L!PRh<9eƼ("FSr= t2}=IozC&e߿5t{u=:Z-`m8K h2|朻F?ԕFGf#Vf&OVvKF{0`d2oy?4X,rwlAfȧ06C}-.ya!}zdiVU]RU>DzAyi&?H:pL8k^*(B_G=6fWب)wW mL( 2/=T2#vBP @tJx_ғ| =OQ0Y7(gMP_Ȓn8| 6ǹ5Fݯ&#<=s*^2s/eѠ-d˞j)U p&@XцЀf1Hr7^.$$B7qƗM<&yDF{cFf˔"o3x7f7+_RFvDSÓ}6H l4\-C. IiG4L+oN^.sy{TBwVJBBg_ EŵKfވ{6kvw'jE[ a=n_wUund9oZLDK) d 7fsj]Պ~TjeEjrHzy{r*hoeiCF|裷ɇtϋGn}kn>tWȇz5_+C7~|#}aGI+9zo>t-L) pPMG 怇EY]z*cL m=tmdm8btfH/UIă \~ۡSRmf_/b6CruIGW1tw=,U 4F}g O.6{eFO]#c*R ER=i<hBo^|+YZEtr+#;Bfkb% ?^}r_1?]^WcDrMWhQh13o쩗Y0 .@9eï śWlF- =~Ys8E%-ɉRz@MSW__CJZ$kkEm(a#msH)N/dI-鶉T)K5{o+.^2*6!.ߕJ(]m8bFkd%$J|2g2#ݲe 9F*Xb.zL7h44.OGE _lHE[lH_ t>ژG> dyW2'd+@r# / (\ Dc3~Ѷ;^&uvqk%6T$\rc6K Idz:_!4sn c t'F;<VK1%*$+iϩrJQ!mIUsGLag2/X#Gpzb"8m 17-c3hl ([&$P;j&6U>q6i-4WVBs>kTl=;ùH S2TwӵDrƞuXKWka-(k&a-mXK݆ta-eXKwmtV%9)0;vpYrh0aZg=5%$DݬhN4wHͪmi<1߬3ĩ *8Rp[¯p FHA=^cJgVة1SnL8,ŸOtah!9C3mY6ke)&X| D/ׂ,mgFLPS0]>s)5PǻIM6)9pDɤ);kk*~m/>rgo+MP-#_!;->sp$_f}\'©3WO;./8ۗ %y2ٶjzvιe8j׶ARH;|;1!ܵC#*1GcG ~zK3#XT!-,V$'xTZ1,)b ,wTQkJRbFGaTeO|by;<.ݻӲ alm0 Eݩ Y,Û^j2~zCʶxk&xZK`wJBhN Fyc=n2KaͩmIqNI"p۩T.^JJ<㛮e{ok6Elur4?i~)?jDg f; IjHVqH~6GbWh\G=i߃o)Š3f 0OSo &3#G5#FppxJrRBSڔd4@slLsM#cFLYjS^Ry9ZG|{顮@OIY!ԭ[5Z:\~߈䅡 D?hMv 8{i%Sy/eA$90o粒l>1$ 4N3> TGWP|3^报i+C4Іz+@ #cK Sc5RMe9ξ96΍3~}7Gt}!,EYI3^Ft03?'d{úRy-,a^1-LH OXu\Ea]U!(v5C*ylC-fٻxtExpBΧ/CMj:yGY%#\%?6~CQf:IO^#xtH)=b o-^Giv񍤜3)WCW~ A&ŮA]pKn8 . A8Ū8u/z uwIidnƷdK|ϧ+=}:.)faaӍĔi-VmaM(X~VDɑX `ƍˌĤ[ bT:{d.:L pׁ#pJ٩ SM :z<'E7E#YR/_mX43/veutbx!/Ioӊqn66zwDX{o ǔ"ܾ FT^g/8ZJ%joTN lSr4a9b'_$3t:>q^r(XD Oo}-e»4 pc>gA$׆Xb)tW!#xQS y dW Va] ֈiܕϠzʹ:ɊdE%bxO(9v,sv៸Oh'~'n2w;>w;w8w9w?97YMN NIۻPR]y_!>9?Wto +V 8[i;bUHV'  1EKIB}FsIa਻+f1I?JYaE9a :f4 ,pIԗI.Nt7Kk PɏԬ0n JMlpR'ڎw4K  =#^%cpS{[`񥈃GQmAe Jc$ؠmᒄ21+e 664!+4whqϿv^-9uM~0` '+MYa@40KX$Vn] dΧ"VaBHWB Ne}8֟ʷH*Ў*,bo087EZ棍?}$&jo)ϪcNS PGȜ2ѮtC@ Hv44/-4;~'ʗ<(eʟ$PEDEWbMٟO\qed/F3GGF3#Q-aZ -c"v8M5-eq&jtY9߾7] *\X\yެnF MiI3Jk8uoִas9F#$n^ z/{? 'G_q>cZ{@K|aq4u\ ^b夎@?8wѹ98 gߝe׻ *qLZę_ ­j![9F_#jraJ]:nD[p B\}tݤr\Qn[5D3 L-rt8hS?UX`G(?][!ڴh<5 YZt2eҩOqvG[I 蔢N(ySHO)A=PȀ yl4R G'} &D.qE.(5a"^16:O B 8騔 iz8FD7|s%3+Fbz{wB#5:FHHNhNhNhNhNhNhNhNhI'jrRy캷SKz.0BG4h :L``2ODžu U[Xnכ!!dE}5Nc9v8#U/8-+t*;4Su%(S((&ۈV6Bqީ1ӊ&QR1Im􅖨0EEH7XEm{0h"JHbN2 G7@8Tt+m;R"]&T~K {!^z;HD%qFj뢊QHƞ;JRSHb3 7fY0ͯYϞizM‡rǏ7}Q@~$oU*B% ADdgŒ _O'3D߷|&J p"] DSYo`ƓKKPN믣O`pFMKҾN2לmO+LA9IKtNm/Lh8rKL)S"oNgIYQ$>K6ɒfIֳ$iY,Ie$>K%IdIV$w*)$X%;ˬ"1i$E4J4gPIuʒlt~J)K-Fy_U#KUY_5 $t$-9֛>&9Bp9#`3"lo/Lr\/YF CY,ȔBFdF~kk $8Su6_7^"*L3|ϗ@ly >@X"9%ՅeyƢʹpY8ؙZCM =Fsb߄y owυAj?|mV">Ukdw;H,ȣL[,2y ˳!!ABIi8:y%3!Rkn)`+$E*,|fz=]G-\W;0S?*ԧm#NVe*Yy&erUtD&wY8Bj(6s&m`<&([41 gm)?־*M&[D :z(Ӣ?bh%{:hHyX`oBV4r2ی1B3@&ff:emtkRx&:F hMAK%Jn6q,CB  0 Z}^Gy5ep $F~4n>eYV!pRiME6ͤƩkJǸPש!MPu;rD۬*H⻳T? (8GDŽ\Ta9%8j9`fyBF j|ʄ`ɴNʁ=模/6MT-GO0MVup >R 2g $>{<輩YDSnmmOҨ^aCJ9fn`RJ%AD2z$"q=|MV#]m&n71g!MW,}tS\PM?$x `"f';hf()͗!bN;"4a Kiv\z9R2\ jC-<0$97 (d90ng_eY ̉aH'KEX2ArST@Jn2q< Š|v Qf2ò"gMyF ;蔷\}/ qLs~m,NwJ]ላT('(O) 4 Djӌ@Z1&P`]jHRLLP@QXIdifgt .VXd(S&ϬG,p2hǜ2(XBvݫ|sO)j5n| X[@wfwl&VJl6pC< eD;#`c,Yp:dGX2&dbiv.nf[R%ZL}aK:_Z]UNVȬdHDnǞQ>J,p8P6 $t@5 wU:60֏8}g`f~Es*=-r DJclATS}3.^XLt5.X/0=T{4ԚqWWQ?S`X\5̦WI<0?ӆry7٧DL3%EqR)jC;7وb^v@l=e9r;7N7w_f%p۞ɵ9d-&"p!wJr/ Fy;, p촖)ʕEo @ h~5G5z8o{ZJf0 w78bߦL\6|OLPML _#51JFŜm뙇gAgre2 kXfgoo*f2\de E~-5ւTLy?"dc e>Mcga۷T??RF ;p.y{;rp.FQF1-8smc:[sB_]/n4+lasVI'=ch ;;ʢוZf71U{`F(ں]f/jܪqƪǿ0°g-#k/s0\f0gofcC]%PmI/o[%]6slIsk$Lv.N=H^wr_lB؎̨VGywjmj1 #jF{eE5*RTP!xE%sv⻈hsЮuV{OO%L?M9E'n+m;O'I,7əٞ,Wo"QcDSpm~\:t7}/Q˕Qw  ~@??>ܻ;V_7L {=ql_˞~n* j(Ju c;Xz{5jP KΎ5Hbu 8q 9< A(\&3)Ejk5esg(W kY3gq ګfd.JL\Z*Lt XzUű:Z?e . _"X5A]!bgMp9wr#{-|l|s}awzΗ!Wj#G7?{u=Mg}r }ڮ݄R{L{9@$qD*UjD]-) EۢM )=I m,I ŅdKNtڐ)Yn{VKrx?;iAQ,vmMͱG#iy{sLaY`c^Q;+:XC%gq\Uoc|{֧,뛱 8lXIR4;R_l<[7bz| ϼڧ]7T)R% 5;OQ>2yh3 xhRߑ)tSQeW,E#s=!&F_)d}-d8R- >}8r}7 |O#fNI[xIA0`9 Y}?1M Jc8i 㿤D]uOe_ ˉ4clHmj/WbŖ-k2]{hv3ZVwr 4iM{X -<3yܙv(> 0Ia$5H [ m '8 S^̕Uͧ;6/_޷wgQ a'*$u7&ݱN ]1v T!`~kBB疃!N{Le|*E4`0 (Hd>ja@s]ʥp`w@k.Ld'n5] e!a"oIoDRN7h-F~^s-:w۽J<۫+ 6ZxoYz wg yVU Gkjoؔ.amJi~X)-AB-P-EW]F<##dy㻻{8Bp,{mWSno?;-/f~(?YDm͠D)kXKEpxIpI ᪶ԭz6ƶ jS@C3#tȜZ```v2E*ۿ,fWUB*VW6~\`@x 2o.wi'h ?$zח œi/dLC<O?b"C.0&a?7z@a9o0UېHfp54PS9E e7 uh<V $|3@(׊+r ,|Yz[t 9{hp8 Lvz2\Ȉ#JXt'"Np,Rd.Y=o<RKT'TP|n@4oϑ*xto=P]l)>X6ݽ;B]jX9ڡm@-Q >IyyE]nDB^}$$%FD>M` ڨ~|'odlLjͫWK,l7. _[4ڎҵʜ?˳2h`#$ﴒs֣ Ř@=֜̏d\25sbMjS"tVgʚHr_Гw yc^vVa8gCgW} ɖf&bE؝:HJԃ`uVܬ|F--P&]ƶ÷scfI/g;sk әmMgx;J^6 k,AK ӛ=CR:kaxCs9r%0@hdKpbt &҉AK,o{Q`b Z` |ԉj3lX]O7LUDT tLjՋԶқvqyKb,A6%vzVm^Us{_Ϫ턭H{^!#o:&籀{cQ8\s'fXժ`o/o5A.뇃q8.oSqjȁ9w?.ݤTs=oc@{|B&)Z|)oDP*:ȭIHLNE.j1hUDr2晴 !^!`d^77P%NOdG}~ô6Y.o7(sCPrkk׊rfN9qIǪd{_YIO٧j;P]wHT:Epwv}S7.6ּk>CI9LQюVGi ~0)#Q߰&-]*%@5lP:-3 *nA}_NI@Tけ I^wχnB ւ6JZyE `* ZԂ.4NJN^͔,?hg @ *t: Uv&$X{4p d9 /54p$)zB%5{nɡ ffowDQ[\ ݔKj9P2@#Pv h,`7=fl/;[*1QCk)aZYRx[?f;!qZӸ0r1>yĘ2׆qYT>·}cFR:YWՊ#+C`e~})\|x0=+Azvȸ7sՇւMeݝlz~ 8?f-8mj l'c)]Q=#L3G6OgW֓ӯ-Y'9'Gſ6zwjÓsfz\ImIOO]k"YoD&q D;jBgȱ4.&W0=*`+*唭G i" 8bKkE3`˧Rj2&Zuah(g.`kI@hן?<~"?.~CK|>^cl_Ssf%S}i-(mLI:|MaLTcYKI],Jr2P'}iIblz_MJ?!۬;+9_K DbsU><N;,eHt$[BȞz-L☔WK]^l 5T49Apa5G;s;vМUC7Q7i<1h8a)?̩E5b笕>T TΗM/;|=MGFܟR9/GiSH3k^H|pUJW]eG0Yhu&D֒[:!.#oeп}<1an"ط*/L_ w OsG-pMtrUx#QA`B]gzWV}sxїpͯ~{xR~YĄ8nFZPlCl1,?a2-5n R&{F]j=66fT٢{w~؜- =4z8э:àz67AgQv%ֳChN(!yGW85C 'KzY[KFU|4BEL v\SvB)wdA/Q*LZ$ \t\.#VIն;BKptpsmX!Yrel L8#o&G]Si\Њ":1j]8Wщd8.9=p] AӘz;-1>2ϲ{pZI)@ùYUBf2?'^ T$9zc=)Vhi@NlRNr+Q}Ϭlv>zꏛT3Ҹ힫>q>G.9+׋ŗ 8!뇚)/3E$ïnx=R""w_v9PP++~[ݧtdq1I xz ?]K_'A-ҧ嵧, :D}MT\J٭˯ZAw5CK7]T|sRkhjd` 4XǁQfbd$eqDB0""It'Q]bv80Gh]rysNE{.djCv=v]KP XѿJ*yx@Ҧ]"Ҩ$LN T6 =n73hD:5ZI ޶AÆzL՛[#q-?nRMOyV'DaS!5 ]Hf5ŲϸrsY]6ϯ~ՅT@A)-Bpf{Db7*gc֧ꎙۇ^k!Z>tl]{\%v^0L6:1_'\*˔1PZ3shBbWf*RgTxsdi Fxd1<$q,O*P N=xed$3/Br>QYyq0:*MYDΫBKEP8҇o;P+hJ'.` ɥ]B驟.Ojj?}7LE bP#Z%˥(o@ӜDH#c""&m]q|IpVG ^QgтBنi6LŴ"\/\<.UuCs;vTzvB#[*\${r*7KH*&M$dȉz92$:p0LQwz51.R2\tz*s E6"'~_'_ T%tЕKVH#,|o77uz^jZ~n4xA5Naq=cp(((KiY.JAmJx.S"X*" 'n m"ot֐TNVW !&"_|wIa\aR*jS׻ρ_EcB^SO .:gܧJ0*nNwm-WZ%P\hA&kQ{\ʘ F7I/Pm]L _!GiP}3dڻm .4@@H0zꏛTQR/'~LP3!qE][ƚ8$RR\39+I OhwyKxΚZT,^OCP{q^s'ZQ&#\04|#jRÏuHd?LIfC` a2ŒpQ+%L]K֢٬JbN`l$rXsJ"s(k5' =>K+Fo44 Px̌\ʖgT3\omh$co۠) sx!Ұ"HF{?o뎔37'79eMϥ4wq[qBEP 1he2 vۧw\[Ǯ-7: PL|x7hm2~Y ,8~NDJbBFPJz3 l)Q;vmx9ڛ?8{LD #d7Z*)5᧯" W LBVoR+ F[c(@#5;K5qS@(lD#'mã3\LF8}_ e m4ʒIs3KqxdO0(5$A4 sę2DK]~A)XP)q#PD L&ym= iCl<9ªKFWݧl@Qū`gZ8"b?a`?mr{a$D?e%pU)IiF3l#ENO]]*f;,nxV6.57cd{-TL5}i|W ۸u=՟f\)Z?eY.ߍF 9?\#E*&--o('+ ܾcX݆VLN{VJW;q`W;qxn(l{;@ ^snsu=2j?\]˹6xV;.6|Ӹv{ݷTXNk~d?v?͈Yu?_bGã5=[5}g9=}g?6{= }{x=ΉNpEX/urՒ)*h"ZWG~}Mj-䡕W;ilPLT?L|6i+nHp }@!+2vPT$jcϾOE崽[MH'95r1-'k+曪;y{NJ~7yݝ>$VH6+ž$R/5ZecV_>F6F'8#x>z||3p4:Q p<݇q]AvU:}v;H40d]۱3墝z.{R.:7h~e'AY@KU(.Y|9eE(j\\t _>Bug7NxS(;+4 󗨌Pow?=<\0)AasHlIR) Rٞw(SiKfEy917@~ZY8Zo}Ck%r[΅X!G"F+ ?Z=+VGUOF8 Fg$\0=Ǖqex|rD;;wc8A[.LqV9! N9Gs\G_Lfi+x;wg?7wπͼm$lyFLxiS `zm 9Fo[U1Ir% PZQc{ k h>.VI(!hjpIm۬rϤt";VVҀRЛ!HU`nYx;[U;ia&ě]e[7%|@'NQP;؇N孼}$,Zi4vvy}JGI磝TI.I2L^[g}t=2LsT(T/Ƚ"j`8ڌ/=x+6wJBK_jcc7N.V} ԒV|_p.;T tBTv|*j|ڪWS-d/8gRMho͕dƶ70`fWI#A>0aB19V'e5IÇq6%9BF4IrPFC(FO" S9ܡ81X*XDFӋ X$KHEd/0ΰ` htQ!S8?^_捅o Gb>XfƠX,lQrJ Iέa N2KImHj@r!j@>4ثbIe9]_u=B'k>|a,~4ɖZP\- 1I/Tziྔ`kیA)dzuIQ֜1!)dɔЁA΁yᑌ1DyHbD$s?eBe_t=jonpfb Vm^y#Jj5Pz`ϹvFD..X`0 dHuxXY)͜G@d\##P'zơ=&erqLSu3mA52$wVd,{HIVT^ 3D N[4/ Mk(rՐf,/C['YkZHQ"l0,dW/VܨCQjъH_01)H qHBQ,i Ԯc*-~ ]}]/c9K]d`^̔A(27qwZVGŀ{v6zЊ!~νI20wZ`3DM }i$ءHo (޵ _Lܨ.Y#7xV#ÕSVHlȅdh=3} 5 q? ay8mDi;S%K v&{[C~.,p;0ٓh.jhlj{XJEȤT B5%^(ym30\2D%ˬqTwr=)4$%Uu Et6(U@$7vnK$fJ(2!梧\{H%1''<_ l}/d֨H ixB)$Q8 Z?l(A1&fjبuy7mZVCq΍r`)^ҁB vh b #rnTd"dIF C7Mu|vcm9c\qbtf/XyOMe`=ϓe`_>}g J"~WvXiYA: ۗ!zcPħ2WK\tz)b 0p/#h_tH 寋ODQ|u$(8SP9H[3Y"w*Q$cׅ\ |YXM6Ֆְc`cVLe0㏻t (r7}?VcH!2%R2G̣c'TID  .DzS1!C{p{dV:ho{%Ma=䠙)TX9r(<~N;1tޣᅧV&F"E $XGČ@ޠYrvly3S[taRj1tW?oޜh ''4r#uUykk4 Xu`(E}"HOZ'Ns35p{Rx?DJ8u0[r;Vi5W8CJI /:i)Em&] ļ` 脹X;~/66{׷8vzČj ;!磖CFaJfdާz*KRPggj} Me{|sk=<)퀇֥'g'Z懣=稌\Ӑ&\Y LVBM"bȒS tgl6'$0n=T\]!=\KˌL0Jg ' Y(T>f=C cۢ 4:R'JC^&lSEG'q멾4=t am=,[F1"F` G1''l qi0D=Wj|XGT_=kkRw2&SoCc >="3[^F@w%Csb@ aHx<` >8emjVɇzS7}B\"/oC9쓆[qj[[LR7DV}  LseBs#]Km{90o inTٻ6%W=%@Z[Eb 9UbLQ4IY߷DZfG 4[WuwUy]F}MSW4{&uBmV `Z:˲L kj̲J0ٳ E3F:T蠁RW6f׷u=%E-<:6t ,ԐTΊ ĨB@W&G>x;w]ur ٲ\Ϳ@2yiUMN5YKh%h/m薔zm3>ɘ&_,?:J,tD2e2ByIТwK$\>B^kE9S>POhOW÷q2jߪw|ŖӬ;Xl\Q1 d0ʼnfЬ+B^k7EUV`D*Qe e-\u~S'ue}wvd^f|Yn¯eIMh$hڞ3͹㴂N5|5|[d29)w뎿?{=ఛ/n/V'6$$%.~|!rW_NZl6|NVQ(~M_'XS3h{fswLYH6XHHO G7wkb;},/WG8t2/nu6M8Oue:zׅvhkeymWQmO\4*nYdVA}(_-juꮍ<d\SKr>1frqބTJ5QRF*5lS76Pw gQr4U:V"ܡ b?(sqvԡBO}ͷd˯}ϕuHCjo=-p 3m!8Su(Pt,xƫ :|9+_ǿ3sa͠E+M=X%1 D׿'ў˭ .з4yY?lhJ}f}g_U_(JWAT>10J0jHɀXf(R\hQ CY80Fk3ru=e]-eyNyvb$~cDxwkXarL84VVk^͂`Dd#uWlks U\QUVY5VК;k@w|X+k>n~w6 'LE@(c ՜-Nv剹9ʈLY.0HWj Ahw}_u ourh!܈jឲ5@5PƄZ/6G#ߍw.tqmY@a@YT2ߋ .VJ++}Y~)U]67t0dKUpd&}xPyZ ~is/RyѢѣ9G ~Gtp(6^7En/1AZ#,d2؛)ߑ }|$&㽧Ջ6LAo`e_7^js; sK!ǽ𓳵V?v̼@gbSr=`|/x[Y֍)k ;#ߝTɒ=lL_ү8_,_Ķl3/46d-hV GÂ-Brt\ֽaw )r+o(췔Q]5d0xeY\4UȊJUv "ɲ+Z-/zO^/ܷ ޗ}˚itߙ}O=4P";[֮mt=.c}Ǝ07kfoek.hA7ݤ1‚ޑ!UCֳ%jwڂv3 lC 0thʪWc\eTTH Ep#1;%? 仆 _B;(q^k?0/]}Fb g, e\PU-1Q>ok ju '59"Sruz1Cqh 4Li1WF_+t+nX%^^_g 6[0# N)r0/ك]CDZbXs8滛3s/,e#|Pu-~߮g'?tč$LP|A 톍f_fm-dM'"Kb??\!P$4q=! e?ՠKYg*Zd\PY4x8*}_/kN7w]<|f~:O&'d 0tCŵɳ%9?>}< 's'3c[Ow+ A{Y8pj^lL|>ND}4Rׇ '??~| u jvC/]PVQ*PP5++2KVC)YَV<)/cOeu)HHd@[? jI&AJT8e]Ȁ^0"0(xJjEN iݤP>02sQQ[- أcvp˵0-0p@& i ̠mq*g fA+T`YHF\8Z=+drC~\qg/Rͮ$&=NE:?{Ƒ pQUuj@ɼD]%ZIIbjZ-7MD5;Ss !14Q|2FR,ՈIàg-"W-ui߇ @ `2BUQPb~oC ,$緀"hgߤBHύF"J s?/Š<+׹$DVʏ`VPKႤ3rɆ·PJ=5|/'U5v8 JHE.PsB: `Y 'L"+E~֖1 q 0jmZ1LTv'ɪqBZT{52F9@R*pgܑX  ;2QS%s#m$v>\7nOJ!i4D1:2GIMc /Rr cos"0kăڀR̐9XZNLƄD4;UrZ]99+&oYZί|E ,SOh.ǍUKe#rA.Wދ1mԘHMUta) #yy8ia VkIŞcܤ3[K>~qb;@%0Y\wc")S,b")$IX+.gc\4aIAvq T~gDe5g?{c:OBlOE]tOI0'֊*h FT0j' .! ;D/TGA~{UWTH@URQB[/H=4 J~B++<({嵴.+_;ts`"$1DVs ɺ )BmN VФtqj\Hv;ZwphQA/og./-%Nl҆&4L)DKK;hvvC΍Ix+9,,f;3VT:2AdZ̞M餍Vڝ? .VvErrm5k 0Qjo C;R2.hv>W6jmB(%'V&H)liG aM߅ziU&3Y/%%yJlE `i!hTi ,4dUbYӔF/e` 'ܔΨSe2ЏPN>̄Z]"$F(0pL7(#eх:!zqd\B("&s3+){: l9>YHA3gXKAC\0ćHN[V{@;XKB0 UHCϛ-~$Ĉ)<bęVTm]rqxЃhmwt9&c&3rgTȹ`Sju -&`2N35⏬^Sbm D8z%| E(rOB>̄R;F"Q"F4Nr~Ԏy-QM$F)F-c%+5f\OYsg.v"d/[#&*)vy'./֔C-\ F<#Kj;\Ϛ%U+"Σ'Z 䘉LEJעriB{4. mtQx.h9Ҳ"}f.vsG{!$Pr FEd9CJh]2}i ב:7*~xJ؏(DKb`gB4?"~`ӘKhۃUTP+lR+ܞŴW]Y˄RV+GLk Wgu14 Q%EWe\AFJX%.ZtDˁCDu y&r,0؞SYrY@UTx."TS,%hP[%T &2*3~n#5j܎D֚t+bOVTnXR١7g*֜eߚ?o EԦ(Vr hX)hRNqGe2Ь>~eݶqvYcaqB,:JhpiCƲ(KJLi44rˬV)(NKz3>dU̢4Jh h 0%Zu0 5AP]T!q4HGsaP"y fmkXf%LL)Bs2x*k )]ybr&uZFS@1ziC&bVPbT5)o>}u'ިԿGImNϗ3sǭ|ﱵ {l]vf9mk{ƍd/2< ~I`h$E&eDH,eYWWUw]$q4LFDr :JX'[Myʅ1Ul (Ռ " *JJ5p|97⦝ =[ 'g 6abI4&dvf 8QƩ!"2 @w9kra-o1Gᆽ2 ?)NHhq=wy˿Ud` ]No=jӆP "Ѫ ҈GЉhڄM 1Q#F"|)L4 nsO2 SL2ӗF^i4""(HxPLBgAYQ, z)˕ ?G.V賑(Mh*c'$a=Ž_x3RS 0Ej=ٽke*]c-A6v:)`ѝpwx^>=Jŝɾ{oO?XLMBsv':(oQ*>_ݸc #L ;j/j:/wEþ񝥷hۣ,&n:Jw'ʵT !4uU'%?.noq-ضĄ30#7s WwW=oMnuQtr+gjI}c]V7>b\pܖ~R':w7*Ԍ~8ƻ J=:UGvǕ"_Ұ(؃\2b{)aWBW~&1dw`Xr{3в_xy4'B=E/Hf`d@pr$PXք2Z䳍~ nI% %/vZӽM &ђ~my '3*VzOpu?Zms.ncG.GA Qss @;CK۳]Z={]z߮Лρy2ijJ.-#\1[8-YNkHbmǁP?dI[iX*Pkt19\EI3FZry ̈='!@k:zSTa @٘]z3Q6-oGnݡyez\ĹNgͿwY'weJqtBi:U?=FfBQ@:uwv.l><Օ]nK(M|tZ0r}jXYz[.!2e8n :H/ |G#֛<䥃jI_ւp )7b0J }GߩZvh^Cڐ.,^|""tmEq^$I'Nomgq?_?#;jUqY›+3CD *MkQ>rόbp |RʍPkc^Tx'N0CӐ1.Pf&dY c,*r܀ LE LTYsQ2!Tj,JegLG.@ٟ|1t*/՛'-ܼ(T-PP4Ru`W,^b( _l‡ool1"p7o1!ڿ@W l޶4Coo5 !1(('24("J6?(6Ȥrg@B8A]a )~T5:Rj\PIT{SA4)خ.FWlݬ́fJ46J*L$&F"DQfU RRg(®xZuea+xxz &Dj YQ5{DevkPTF񍢢f$pre(&ZA{{ Kl-ti&h *^Vٿe5þ5A-5(UTK(:LͪIBh{*ZYh炾*0Rq-Ed &|ng\fx`o zۂ³՛{.0f0>߀<na)U~RJ7|R *Ԝ*G-LI)kڥNoD7cq{FK~* @|E 1 ,a }ZJKMBJ2%-l) hc> &N;|GCω B|>UR(@iLߞH{Y, UPiDZvo$a.1*xSNx^2&qL)ձNw-Fx$N@Jm0K#jA <fT,ΪdԏNҔ*Lmz}4?dVUM` Jݓojq4y8c-ZbcD6ݳ8Krm v2VYI}BH]ɴŠw< fA Ŧ2s 6%sð^uR;~5%8#}PTZx -5g8=Fj! 3[f$=u vA}1M03O)e}eB]%S)rY<\j8wJF8^LQk[1`/pFዲ^.LƐBjdY*4Z"7 F2R MHmb$6rp;I_or ґ)ۿ-{/mUYH0cykye>Pn^**f k3,Okup j>hlK_G0ͨ`IL2ȸM 0 5F * ZYˍL"ݕ"OF LR! \eJۨDDܲX ܏| 9ɓ$T[fBSG39Fݷ~&f֛2>:ۅ+up'Y>h|z.k}'~+]~|HD7|윝{xDhjvj?}q.=jiq9;_ymA)csۣ@!1($!'h6h'YpN98SɉxL#y4ŵbs Ux2Fl'ԍFi'%y.*ѫ<&Ra4TM]'Ei[i-JSA =NEWGʍ BA۱AF 4".yG)2H)= [#!7cQ߳%8Y:;Y^z~f8Hິ!;f+:MW:Y:+_^8^bl1h.|gOn<~tdCnm2 xr re{j&buFQƤVk !Mb$Ieij$O]Z*D"i6PѝTWnh=]{q5 Ou'OP+Kg~QFzbDJoSMOdÝ%x >aOZ1+n[=GqW Zw(Z<ϸK3~(45x{^b9ivn/{x-]i|M µ%RADgj5w>YS'\iيJ_&r hdK==5M"-NeۧQxBOԐe,uH[p~HRӳ~M %D7]wL8WĐP)W޼h<`$I,$> $֥ EiFY('#P,ru#PVneI3n a$nbJ{bSt\F+qԝ48XZ^V뜋UߑxT|rF:uL$jL>s7LNA OWRf顅x{Cҥn? K7֥0vsU9f>ą>)uӂw%{rN9gLONލy( w;;?Bh6}p<-`#dXũ:2]K1ЂN@T ~+ ZKE4H%njA38iki=_N6Ŵ{5a&Ad9hjpr$Y.Hy# QCW)S6~A{&451_\ JE:9L'kqOyGR7f@R|g7 ,6tCXBK:d(laIεh悘CIHl;[T^ܿ;XiS; ńsX$mOQ^L>-J;HȒM9Al̊19R]e7H.tת~}yБVg ]Nn#Hi$gz,RƑ&zg:JYUwЪYwekң]>^lyj1 ̩^&/ռeeLghaHvrB8 czo_V2T|_Q́W- L ' ^9?rNDm`v6QLu"%6K(ҘR5OjȨah*DHlcjb( /j9',)]7 $V 62:QĒ <)&X464 $`RVesh74Ea MyjyrCSp8"T)_A(KDSRHd7/ViB XL8%^4tMf-MI9vkѶ oJ܆a=Te y7"ȤR/Pe*;m `lO,Ց.W"gaԂS"@5 8ÆX_Uur[oxM|ACيv#n'C;$Gw3)n1sn/pTm$1.`P*I#G1?U?ԟ{a}8xkG{ۊԏ|#D]ONbj_ MkT>ȃyH^RsBT9awc~|lΡmLȶɼƀO曞8΅RkKOACMqkk-?OV.qaV{P]l󹺘2M;rjo;ogBu&  -}Qa@݇kh_Lt{}~ɽNؔM;̯77ΧA3E}is-|LWsI,g^| cQ*GZq+EP-.Ĵj:` hc)wٵ*E[|Ԁ"LAnmѲjgo^ok]J*.|:{ZV;7sv%tԉ} /UB[ǡQcbx(&d|yJC;R j=8Ke*KPP;KRLŠy\Hlwn6BAh6RYiڂr_r`il,-yʶm2Xʥ]d)5wny\rKPnQgfi?˳-D>ܢ&6,"KQl,E|,E=YJR y.Mu첰b\B}ʎd❈grG)7M/48=;ѝx:￾ |#var/home/core/zuul-output/logs/kubelet.log0000644000000000000000005541474015134664701017716 0ustar rootrootJan 23 10:48:54 crc systemd[1]: Starting Kubernetes Kubelet... Jan 23 10:48:54 crc restorecon[4679]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:54 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 10:48:55 crc restorecon[4679]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 23 10:48:55 crc kubenswrapper[4689]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 10:48:55 crc kubenswrapper[4689]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 23 10:48:55 crc kubenswrapper[4689]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 10:48:55 crc kubenswrapper[4689]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 10:48:55 crc kubenswrapper[4689]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 23 10:48:55 crc kubenswrapper[4689]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.500728 4689 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503558 4689 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503577 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503582 4689 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503587 4689 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503590 4689 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503594 4689 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503598 4689 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503602 4689 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503612 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503617 4689 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503623 4689 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503629 4689 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503634 4689 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503640 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503645 4689 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503651 4689 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503656 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503661 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503666 4689 feature_gate.go:330] unrecognized feature gate: Example Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503670 4689 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503674 4689 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503677 4689 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503681 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503685 4689 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503690 4689 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503695 4689 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503701 4689 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503708 4689 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503713 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503719 4689 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503724 4689 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503729 4689 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503734 4689 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503742 4689 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503747 4689 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503752 4689 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503757 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503763 4689 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503768 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503773 4689 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503778 4689 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503783 4689 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503788 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503793 4689 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503799 4689 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503804 4689 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503808 4689 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503813 4689 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503817 4689 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503823 4689 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503828 4689 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503832 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503837 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503841 4689 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503846 4689 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503850 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503855 4689 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503859 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503866 4689 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503870 4689 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503874 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503879 4689 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503883 4689 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503887 4689 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503892 4689 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503897 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503903 4689 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503909 4689 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503914 4689 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503919 4689 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.503924 4689 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504184 4689 flags.go:64] FLAG: --address="0.0.0.0" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504199 4689 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504207 4689 flags.go:64] FLAG: --anonymous-auth="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504214 4689 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504222 4689 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504227 4689 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504233 4689 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504239 4689 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504243 4689 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504249 4689 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504255 4689 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504261 4689 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504265 4689 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504269 4689 flags.go:64] FLAG: --cgroup-root="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504273 4689 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504278 4689 flags.go:64] FLAG: --client-ca-file="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504282 4689 flags.go:64] FLAG: --cloud-config="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504286 4689 flags.go:64] FLAG: --cloud-provider="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504290 4689 flags.go:64] FLAG: --cluster-dns="[]" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504296 4689 flags.go:64] FLAG: --cluster-domain="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504300 4689 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504304 4689 flags.go:64] FLAG: --config-dir="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504308 4689 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504313 4689 flags.go:64] FLAG: --container-log-max-files="5" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504320 4689 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504325 4689 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504329 4689 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504334 4689 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504339 4689 flags.go:64] FLAG: --contention-profiling="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504343 4689 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504348 4689 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504352 4689 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504356 4689 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504362 4689 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504366 4689 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504371 4689 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504375 4689 flags.go:64] FLAG: --enable-load-reader="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504379 4689 flags.go:64] FLAG: --enable-server="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504384 4689 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504389 4689 flags.go:64] FLAG: --event-burst="100" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504393 4689 flags.go:64] FLAG: --event-qps="50" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504398 4689 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504402 4689 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504406 4689 flags.go:64] FLAG: --eviction-hard="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504411 4689 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504416 4689 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504420 4689 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504424 4689 flags.go:64] FLAG: --eviction-soft="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504428 4689 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504432 4689 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504436 4689 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504441 4689 flags.go:64] FLAG: --experimental-mounter-path="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504444 4689 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504448 4689 flags.go:64] FLAG: --fail-swap-on="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504452 4689 flags.go:64] FLAG: --feature-gates="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504458 4689 flags.go:64] FLAG: --file-check-frequency="20s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504462 4689 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504466 4689 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504470 4689 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504476 4689 flags.go:64] FLAG: --healthz-port="10248" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504480 4689 flags.go:64] FLAG: --help="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504484 4689 flags.go:64] FLAG: --hostname-override="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504488 4689 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504493 4689 flags.go:64] FLAG: --http-check-frequency="20s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504498 4689 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504503 4689 flags.go:64] FLAG: --image-credential-provider-config="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504508 4689 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504513 4689 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504517 4689 flags.go:64] FLAG: --image-service-endpoint="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504522 4689 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504526 4689 flags.go:64] FLAG: --kube-api-burst="100" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504531 4689 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504536 4689 flags.go:64] FLAG: --kube-api-qps="50" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504541 4689 flags.go:64] FLAG: --kube-reserved="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504545 4689 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504550 4689 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504555 4689 flags.go:64] FLAG: --kubelet-cgroups="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504559 4689 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504563 4689 flags.go:64] FLAG: --lock-file="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504567 4689 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504571 4689 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504576 4689 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504582 4689 flags.go:64] FLAG: --log-json-split-stream="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504586 4689 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504590 4689 flags.go:64] FLAG: --log-text-split-stream="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504594 4689 flags.go:64] FLAG: --logging-format="text" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504598 4689 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504603 4689 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504607 4689 flags.go:64] FLAG: --manifest-url="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504611 4689 flags.go:64] FLAG: --manifest-url-header="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504618 4689 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504623 4689 flags.go:64] FLAG: --max-open-files="1000000" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504629 4689 flags.go:64] FLAG: --max-pods="110" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504633 4689 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504637 4689 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504641 4689 flags.go:64] FLAG: --memory-manager-policy="None" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504647 4689 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504652 4689 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504656 4689 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504661 4689 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504673 4689 flags.go:64] FLAG: --node-status-max-images="50" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504677 4689 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504682 4689 flags.go:64] FLAG: --oom-score-adj="-999" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504686 4689 flags.go:64] FLAG: --pod-cidr="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504690 4689 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504698 4689 flags.go:64] FLAG: --pod-manifest-path="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504702 4689 flags.go:64] FLAG: --pod-max-pids="-1" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504706 4689 flags.go:64] FLAG: --pods-per-core="0" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504711 4689 flags.go:64] FLAG: --port="10250" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504715 4689 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504719 4689 flags.go:64] FLAG: --provider-id="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504723 4689 flags.go:64] FLAG: --qos-reserved="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504727 4689 flags.go:64] FLAG: --read-only-port="10255" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504731 4689 flags.go:64] FLAG: --register-node="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504736 4689 flags.go:64] FLAG: --register-schedulable="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504739 4689 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504747 4689 flags.go:64] FLAG: --registry-burst="10" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504757 4689 flags.go:64] FLAG: --registry-qps="5" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504761 4689 flags.go:64] FLAG: --reserved-cpus="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504765 4689 flags.go:64] FLAG: --reserved-memory="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504775 4689 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504779 4689 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504784 4689 flags.go:64] FLAG: --rotate-certificates="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504788 4689 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504794 4689 flags.go:64] FLAG: --runonce="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504799 4689 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504804 4689 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504809 4689 flags.go:64] FLAG: --seccomp-default="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504814 4689 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504819 4689 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504825 4689 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504830 4689 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504835 4689 flags.go:64] FLAG: --storage-driver-password="root" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504840 4689 flags.go:64] FLAG: --storage-driver-secure="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504846 4689 flags.go:64] FLAG: --storage-driver-table="stats" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504852 4689 flags.go:64] FLAG: --storage-driver-user="root" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504857 4689 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504862 4689 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504867 4689 flags.go:64] FLAG: --system-cgroups="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504871 4689 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504879 4689 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504883 4689 flags.go:64] FLAG: --tls-cert-file="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504887 4689 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504893 4689 flags.go:64] FLAG: --tls-min-version="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504897 4689 flags.go:64] FLAG: --tls-private-key-file="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504901 4689 flags.go:64] FLAG: --topology-manager-policy="none" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504905 4689 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504909 4689 flags.go:64] FLAG: --topology-manager-scope="container" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504914 4689 flags.go:64] FLAG: --v="2" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504919 4689 flags.go:64] FLAG: --version="false" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504925 4689 flags.go:64] FLAG: --vmodule="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504931 4689 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.504935 4689 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505031 4689 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505036 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505040 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505044 4689 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505048 4689 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505052 4689 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505055 4689 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505059 4689 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505062 4689 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505066 4689 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505069 4689 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505073 4689 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505076 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505080 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505083 4689 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505087 4689 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505090 4689 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505094 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505097 4689 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505101 4689 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505105 4689 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505108 4689 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505112 4689 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505116 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505120 4689 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505125 4689 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505131 4689 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505135 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505138 4689 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505142 4689 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505164 4689 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505169 4689 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505173 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505177 4689 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505181 4689 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505189 4689 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505193 4689 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505197 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505200 4689 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505204 4689 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505207 4689 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505212 4689 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505216 4689 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505221 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505225 4689 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505228 4689 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505233 4689 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505236 4689 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505240 4689 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505243 4689 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505247 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505250 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505254 4689 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505257 4689 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505261 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505267 4689 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505270 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505274 4689 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505277 4689 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505281 4689 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505284 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505288 4689 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505291 4689 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505295 4689 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505299 4689 feature_gate.go:330] unrecognized feature gate: Example Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505302 4689 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505306 4689 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505311 4689 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505314 4689 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505317 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.505321 4689 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.505328 4689 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.515269 4689 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.515351 4689 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515759 4689 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515777 4689 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515781 4689 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515786 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515789 4689 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515793 4689 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515797 4689 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515800 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515804 4689 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515808 4689 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515811 4689 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515815 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515819 4689 feature_gate.go:330] unrecognized feature gate: Example Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515823 4689 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515827 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515833 4689 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515840 4689 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515844 4689 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515847 4689 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515852 4689 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515858 4689 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515862 4689 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515866 4689 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515870 4689 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515874 4689 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515879 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515883 4689 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515887 4689 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515890 4689 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515894 4689 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515898 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515901 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515905 4689 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515910 4689 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515914 4689 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515918 4689 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515921 4689 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515925 4689 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515928 4689 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515932 4689 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515936 4689 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515939 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515943 4689 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515946 4689 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515950 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515954 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515957 4689 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515961 4689 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515964 4689 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515969 4689 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515973 4689 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515977 4689 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515980 4689 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515984 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515988 4689 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515992 4689 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.515997 4689 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516001 4689 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516005 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516010 4689 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516014 4689 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516017 4689 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516021 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516024 4689 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516029 4689 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516033 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516037 4689 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516041 4689 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516044 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516048 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516052 4689 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.516058 4689 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516232 4689 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516246 4689 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516251 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516257 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516262 4689 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516267 4689 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516270 4689 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516275 4689 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516280 4689 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516284 4689 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516287 4689 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516292 4689 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516295 4689 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516300 4689 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516303 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516307 4689 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516310 4689 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516313 4689 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516317 4689 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516321 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516324 4689 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516328 4689 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516332 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516335 4689 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516339 4689 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516344 4689 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516347 4689 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516352 4689 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516355 4689 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516359 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516362 4689 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516366 4689 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516370 4689 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516373 4689 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516377 4689 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516380 4689 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516384 4689 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516388 4689 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516392 4689 feature_gate.go:330] unrecognized feature gate: Example Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516396 4689 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516399 4689 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516403 4689 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516406 4689 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516410 4689 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516414 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516417 4689 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516421 4689 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516424 4689 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516428 4689 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516431 4689 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516435 4689 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516438 4689 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516442 4689 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516446 4689 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516451 4689 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516456 4689 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516459 4689 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516463 4689 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516469 4689 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516473 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516477 4689 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516480 4689 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516485 4689 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516489 4689 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516493 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516497 4689 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516501 4689 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516505 4689 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516508 4689 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516512 4689 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.516515 4689 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.516521 4689 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.516687 4689 server.go:940] "Client rotation is on, will bootstrap in background" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.518821 4689 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.518905 4689 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.519620 4689 server.go:997] "Starting client certificate rotation" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.519638 4689 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.520040 4689 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-21 05:21:08.0462481 +0000 UTC Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.520203 4689 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.525022 4689 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.526885 4689 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.527339 4689 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.535359 4689 log.go:25] "Validated CRI v1 runtime API" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.548099 4689 log.go:25] "Validated CRI v1 image API" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.549595 4689 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.551839 4689 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-23-10-44-25-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.551880 4689 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.567577 4689 manager.go:217] Machine: {Timestamp:2026-01-23 10:48:55.566381397 +0000 UTC m=+0.191061276 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:236a3513-f97a-4901-87ca-fa776d1157c7 BootID:bd6f1f67-2691-4f88-98e8-da7f80565717 Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:04:8b:5f Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:04:8b:5f Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:19:0d:39 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:26:56:6f Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:2f:64:e2 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:7d:d3:62 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:5e:38:30:a8:3d:fa Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:3a:ad:3e:54:1a:bf Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.567786 4689 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.567926 4689 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.568408 4689 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.568603 4689 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.568648 4689 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.568967 4689 topology_manager.go:138] "Creating topology manager with none policy" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.568986 4689 container_manager_linux.go:303] "Creating device plugin manager" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.569520 4689 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.569556 4689 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.569819 4689 state_mem.go:36] "Initialized new in-memory state store" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.570628 4689 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.571791 4689 kubelet.go:418] "Attempting to sync node with API server" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.571831 4689 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.571879 4689 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.571904 4689 kubelet.go:324] "Adding apiserver pod source" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.571929 4689 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.574033 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.574027 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.574197 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.574189 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.574400 4689 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.574929 4689 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.576187 4689 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.576920 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.576961 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.576977 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.576992 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577016 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577030 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577044 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577068 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577084 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577098 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577137 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577182 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.577479 4689 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.578083 4689 server.go:1280] "Started kubelet" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.578793 4689 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.578793 4689 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.579231 4689 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.580327 4689 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 23 10:48:55 crc systemd[1]: Started Kubernetes Kubelet. Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.582609 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.582686 4689 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.583648 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 13:42:00.419491331 +0000 UTC Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.583706 4689 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.583723 4689 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.583766 4689 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.584245 4689 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.585538 4689 factory.go:55] Registering systemd factory Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.585573 4689 factory.go:221] Registration of the systemd container factory successfully Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.586213 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="200ms" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.586475 4689 factory.go:153] Registering CRI-O factory Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.586507 4689 factory.go:221] Registration of the crio container factory successfully Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.586596 4689 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.586628 4689 factory.go:103] Registering Raw factory Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.586666 4689 manager.go:1196] Started watching for new ooms in manager Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.586508 4689 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.179:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188d567fd01058fc default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 10:48:55.578040572 +0000 UTC m=+0.202720491,LastTimestamp:2026-01-23 10:48:55.578040572 +0000 UTC m=+0.202720491,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.587922 4689 manager.go:319] Starting recovery of all containers Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.588044 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.588178 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.595822 4689 server.go:460] "Adding debug handlers to kubelet server" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598539 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598632 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598646 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598659 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598670 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598680 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598691 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598721 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598736 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598748 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598759 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598772 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598783 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598799 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598810 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598824 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598836 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598867 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598877 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598890 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598918 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598929 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598938 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598949 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598961 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598971 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598984 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.598996 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599006 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599020 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599029 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599068 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599082 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599092 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599102 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599112 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599140 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599189 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599198 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599207 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599217 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599227 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599237 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599247 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599256 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599266 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599275 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599285 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599295 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599305 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599314 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599325 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599342 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599353 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599363 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599375 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599386 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599397 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599408 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599419 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599428 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599439 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599452 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599463 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599474 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599484 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599504 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599512 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599536 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599547 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599559 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599569 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599583 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599592 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599604 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599614 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599625 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599637 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599646 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599656 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599666 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599677 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599690 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599699 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599709 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599721 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599731 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599741 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599752 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599762 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599771 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599783 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599793 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599802 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599811 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599819 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599828 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599838 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599849 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599857 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599867 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599876 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599885 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599894 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599911 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599920 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599932 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599942 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599952 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599961 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599970 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599979 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.599990 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600000 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600010 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600019 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600027 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600036 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600044 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600054 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600065 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600073 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600082 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600090 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600099 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.600108 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603647 4689 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603714 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603738 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603754 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603773 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603789 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603803 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603821 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603835 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603849 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603866 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603887 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603904 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603921 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603937 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603952 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603968 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.603984 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604017 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604034 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604047 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604062 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604077 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604094 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604108 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604126 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604143 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604195 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604209 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604223 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604239 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604255 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604274 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604290 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604309 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604324 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604338 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604353 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604367 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604381 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604402 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604417 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604435 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604451 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604467 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604482 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604496 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604515 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604530 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604544 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604558 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604573 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604589 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604604 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604618 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604633 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604650 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604665 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604681 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604698 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604714 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604728 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604745 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604762 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604779 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604795 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604812 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604831 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604848 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604863 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604878 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604894 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604910 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604926 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604942 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604958 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604972 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.604989 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.605005 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.605019 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.605033 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.605051 4689 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.605066 4689 reconstruct.go:97] "Volume reconstruction finished" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.605076 4689 reconciler.go:26] "Reconciler: start to sync state" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.610694 4689 manager.go:324] Recovery completed Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.624588 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.627416 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.627459 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.627472 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.628386 4689 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.628430 4689 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.628468 4689 state_mem.go:36] "Initialized new in-memory state store" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.636953 4689 policy_none.go:49] "None policy: Start" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.637127 4689 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.638392 4689 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.638443 4689 state_mem.go:35] "Initializing new in-memory state store" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.638618 4689 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.638666 4689 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.638707 4689 kubelet.go:2335] "Starting kubelet main sync loop" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.638765 4689 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 23 10:48:55 crc kubenswrapper[4689]: W0123 10:48:55.640013 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.640119 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.683976 4689 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.700579 4689 manager.go:334] "Starting Device Plugin manager" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.700653 4689 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.700675 4689 server.go:79] "Starting device plugin registration server" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.701368 4689 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.701404 4689 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.701781 4689 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.702002 4689 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.702028 4689 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.716701 4689 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.738846 4689 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.738940 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.740138 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.740209 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.740221 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.740397 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.740668 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.740738 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.741354 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.741394 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.741407 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.741607 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.741781 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.741861 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742348 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742376 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742388 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742529 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742539 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742597 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742811 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.742897 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743568 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743595 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743605 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743656 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743678 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743691 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743921 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.743996 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.744042 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.744116 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.744132 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.744183 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.744896 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.744926 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.744939 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.745188 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.745221 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.745412 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.745441 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.745453 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.745998 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.746034 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.746056 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.788209 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="400ms" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.802355 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.804686 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.804741 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.804766 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.804804 4689 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 10:48:55 crc kubenswrapper[4689]: E0123 10:48:55.805418 4689 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809071 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809136 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809189 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809218 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809247 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809315 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809483 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809542 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809570 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809590 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809613 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809661 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809722 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809757 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.809809 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911034 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911088 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911114 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911133 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911167 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911185 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911212 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911233 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911255 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911274 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911291 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911308 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911330 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911335 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911365 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911403 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911402 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911341 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911462 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911352 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911498 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911491 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911547 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911549 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911579 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911572 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911573 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911608 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911786 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:55 crc kubenswrapper[4689]: I0123 10:48:55.911808 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.006250 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.009797 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.009878 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.009905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.009969 4689 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.010979 4689 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.077789 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.102770 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.106492 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-bf74f26e092728f6f44ee4d4dfecd950d067611093740401c59d455154b73047 WatchSource:0}: Error finding container bf74f26e092728f6f44ee4d4dfecd950d067611093740401c59d455154b73047: Status 404 returned error can't find the container with id bf74f26e092728f6f44ee4d4dfecd950d067611093740401c59d455154b73047 Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.118101 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.134812 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-b36ef80f6b1c92bd573c9f9937f097fb57eb17219ce94a2c072a533a9781b9bb WatchSource:0}: Error finding container b36ef80f6b1c92bd573c9f9937f097fb57eb17219ce94a2c072a533a9781b9bb: Status 404 returned error can't find the container with id b36ef80f6b1c92bd573c9f9937f097fb57eb17219ce94a2c072a533a9781b9bb Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.136118 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.137509 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b7494c2ec53a3712f078359c4b78b86ef173dc6190f04e1191e7006781729dd0 WatchSource:0}: Error finding container b7494c2ec53a3712f078359c4b78b86ef173dc6190f04e1191e7006781729dd0: Status 404 returned error can't find the container with id b7494c2ec53a3712f078359c4b78b86ef173dc6190f04e1191e7006781729dd0 Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.144294 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.151347 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-f71cb68cb41eda2688666d81bd5de717adb45bdb1c34403325f5f991983450c1 WatchSource:0}: Error finding container f71cb68cb41eda2688666d81bd5de717adb45bdb1c34403325f5f991983450c1: Status 404 returned error can't find the container with id f71cb68cb41eda2688666d81bd5de717adb45bdb1c34403325f5f991983450c1 Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.173434 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-055d7c1824e6109d9815908dbea10153548048762d706aca76a4eab1de2e7a12 WatchSource:0}: Error finding container 055d7c1824e6109d9815908dbea10153548048762d706aca76a4eab1de2e7a12: Status 404 returned error can't find the container with id 055d7c1824e6109d9815908dbea10153548048762d706aca76a4eab1de2e7a12 Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.190108 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="800ms" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.411670 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.413281 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.413330 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.413347 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.413381 4689 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.413926 4689 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.430217 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.430311 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.580842 4689 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.584786 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 08:15:18.638986444 +0000 UTC Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.647881 4689 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8" exitCode=0 Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.647990 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.648233 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bf74f26e092728f6f44ee4d4dfecd950d067611093740401c59d455154b73047"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.648454 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.649724 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.649793 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"055d7c1824e6109d9815908dbea10153548048762d706aca76a4eab1de2e7a12"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.650322 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.650367 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.650378 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.651685 4689 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16" exitCode=0 Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.651767 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.651799 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f71cb68cb41eda2688666d81bd5de717adb45bdb1c34403325f5f991983450c1"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.651905 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.652785 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.652827 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.652840 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.654702 4689 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957" exitCode=0 Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.654775 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.654795 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b7494c2ec53a3712f078359c4b78b86ef173dc6190f04e1191e7006781729dd0"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.654924 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.655677 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.655862 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.655892 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.655903 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.656547 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.656586 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.656599 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.657350 4689 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="3e838d9c6abf98b2649f2c6aaa1597e075db1f16dfd9f5034e156489a3f5f34b" exitCode=0 Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.657389 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"3e838d9c6abf98b2649f2c6aaa1597e075db1f16dfd9f5034e156489a3f5f34b"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.657414 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"b36ef80f6b1c92bd573c9f9937f097fb57eb17219ce94a2c072a533a9781b9bb"} Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.657503 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.658373 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.658401 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:56 crc kubenswrapper[4689]: I0123 10:48:56.658412 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.676959 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.677080 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.765969 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.766116 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:56 crc kubenswrapper[4689]: W0123 10:48:56.970553 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.970648 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Jan 23 10:48:56 crc kubenswrapper[4689]: E0123 10:48:56.992719 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="1.6s" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.214843 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.220463 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.220524 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.220534 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.220563 4689 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 10:48:57 crc kubenswrapper[4689]: E0123 10:48:57.221207 4689 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.584955 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 07:35:32.043398139 +0000 UTC Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.601196 4689 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.663560 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.663610 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.663626 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.663641 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.665348 4689 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6" exitCode=0 Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.665481 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.665952 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.667138 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.667209 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.667223 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.669056 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"1c6a82bee52161d1d14aff3b66a62b14bd2ae0562778965468eb61880bb20758"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.669189 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.670127 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.670173 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.670185 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.672996 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.673032 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.673042 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.673142 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.674218 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.674254 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.674269 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.676577 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.676605 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.676617 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee"} Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.676678 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.677295 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.677326 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:57 crc kubenswrapper[4689]: I0123 10:48:57.677338 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.321317 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.585140 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 08:02:01.837738533 +0000 UTC Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.594617 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.685661 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734"} Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.685765 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.687250 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.687308 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.687322 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.692803 4689 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e" exitCode=0 Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.692954 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.692947 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e"} Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.693459 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.694328 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.694382 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.694402 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.694743 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.694779 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.694793 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.822061 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.823551 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.823587 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.823595 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:58 crc kubenswrapper[4689]: I0123 10:48:58.823621 4689 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.585544 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 15:29:30.833777572 +0000 UTC Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.701306 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f"} Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.701390 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.701390 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.701390 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f"} Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.701654 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0"} Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.701702 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.701734 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b"} Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.702449 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.702489 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.702502 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.702533 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.702555 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:48:59 crc kubenswrapper[4689]: I0123 10:48:59.702566 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.367002 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.586213 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 08:30:34.873475714 +0000 UTC Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.708741 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61"} Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.708814 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.708788 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.710080 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.710121 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.710134 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.710194 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.710241 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:00 crc kubenswrapper[4689]: I0123 10:49:00.710252 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.587107 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 22:42:42.253502559 +0000 UTC Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.595594 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.595858 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.637971 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.638255 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.640435 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.640619 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.640752 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.647699 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.714960 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.715048 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.715333 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.716877 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.716928 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.716927 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.716993 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.717006 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.716947 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.717660 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.717709 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.717727 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:01 crc kubenswrapper[4689]: I0123 10:49:01.964373 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.340702 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.588101 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 09:24:22.79619158 +0000 UTC Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.718052 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.718451 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.720215 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.720461 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.720607 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.724718 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.724783 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.724812 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.741291 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.741513 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.743240 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.743303 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:02 crc kubenswrapper[4689]: I0123 10:49:02.743323 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:03 crc kubenswrapper[4689]: I0123 10:49:03.588355 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 22:18:41.545109252 +0000 UTC Jan 23 10:49:04 crc kubenswrapper[4689]: I0123 10:49:04.589369 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 14:20:39.581820646 +0000 UTC Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.481470 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.481701 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.482984 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.483016 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.483028 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.590033 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 12:23:47.968898384 +0000 UTC Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.630459 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.630762 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.632550 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.632605 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:05 crc kubenswrapper[4689]: I0123 10:49:05.632619 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:05 crc kubenswrapper[4689]: E0123 10:49:05.717254 4689 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 23 10:49:06 crc kubenswrapper[4689]: I0123 10:49:06.591226 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 07:27:40.971665359 +0000 UTC Jan 23 10:49:07 crc kubenswrapper[4689]: I0123 10:49:07.581669 4689 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 23 10:49:07 crc kubenswrapper[4689]: I0123 10:49:07.592264 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 10:26:21.214716471 +0000 UTC Jan 23 10:49:07 crc kubenswrapper[4689]: E0123 10:49:07.603699 4689 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 23 10:49:07 crc kubenswrapper[4689]: I0123 10:49:07.896746 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 23 10:49:07 crc kubenswrapper[4689]: I0123 10:49:07.896867 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 23 10:49:08 crc kubenswrapper[4689]: W0123 10:49:08.284275 4689 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 23 10:49:08 crc kubenswrapper[4689]: I0123 10:49:08.284450 4689 trace.go:236] Trace[342381848]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 10:48:58.282) (total time: 10001ms): Jan 23 10:49:08 crc kubenswrapper[4689]: Trace[342381848]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (10:49:08.284) Jan 23 10:49:08 crc kubenswrapper[4689]: Trace[342381848]: [10.00155795s] [10.00155795s] END Jan 23 10:49:08 crc kubenswrapper[4689]: E0123 10:49:08.284494 4689 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 23 10:49:08 crc kubenswrapper[4689]: I0123 10:49:08.593340 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 01:50:50.986545719 +0000 UTC Jan 23 10:49:08 crc kubenswrapper[4689]: E0123 10:49:08.594634 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Jan 23 10:49:08 crc kubenswrapper[4689]: E0123 10:49:08.824745 4689 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Jan 23 10:49:09 crc kubenswrapper[4689]: I0123 10:49:09.067437 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 23 10:49:09 crc kubenswrapper[4689]: I0123 10:49:09.067741 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 23 10:49:09 crc kubenswrapper[4689]: I0123 10:49:09.072686 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 23 10:49:09 crc kubenswrapper[4689]: I0123 10:49:09.072745 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 23 10:49:09 crc kubenswrapper[4689]: I0123 10:49:09.593578 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 17:41:54.713724076 +0000 UTC Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.375022 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.375343 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.376933 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.376998 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.377024 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.381998 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.594325 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 06:10:37.709317114 +0000 UTC Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.739775 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.740959 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.741016 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:10 crc kubenswrapper[4689]: I0123 10:49:10.741033 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:11 crc kubenswrapper[4689]: I0123 10:49:11.595547 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 22:04:10.101629145 +0000 UTC Jan 23 10:49:11 crc kubenswrapper[4689]: I0123 10:49:11.595652 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 10:49:11 crc kubenswrapper[4689]: I0123 10:49:11.595745 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 10:49:11 crc kubenswrapper[4689]: I0123 10:49:11.821102 4689 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 23 10:49:11 crc kubenswrapper[4689]: I0123 10:49:11.841077 4689 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 23 10:49:12 crc kubenswrapper[4689]: I0123 10:49:12.025870 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:12 crc kubenswrapper[4689]: I0123 10:49:12.028016 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:12 crc kubenswrapper[4689]: I0123 10:49:12.028087 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:12 crc kubenswrapper[4689]: I0123 10:49:12.028111 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:12 crc kubenswrapper[4689]: I0123 10:49:12.028196 4689 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 10:49:12 crc kubenswrapper[4689]: E0123 10:49:12.034284 4689 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 23 10:49:12 crc kubenswrapper[4689]: I0123 10:49:12.596510 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 10:12:37.353521038 +0000 UTC Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.260242 4689 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.583401 4689 apiserver.go:52] "Watching apiserver" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.588497 4689 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.589010 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.589681 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.589766 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.589861 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.589881 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:13 crc kubenswrapper[4689]: E0123 10:49:13.589984 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.590038 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.590453 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:13 crc kubenswrapper[4689]: E0123 10:49:13.590779 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:13 crc kubenswrapper[4689]: E0123 10:49:13.590864 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.592211 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.592249 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.592224 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.593615 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.593855 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.593907 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.594270 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.594276 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.596090 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.597512 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 04:38:35.808859561 +0000 UTC Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.632543 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.655195 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.674324 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.685929 4689 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.692515 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.707854 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.726969 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.743812 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:13 crc kubenswrapper[4689]: I0123 10:49:13.755555 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.067025 4689 trace.go:236] Trace[281193216]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 10:48:59.469) (total time: 14597ms): Jan 23 10:49:14 crc kubenswrapper[4689]: Trace[281193216]: ---"Objects listed" error: 14597ms (10:49:14.066) Jan 23 10:49:14 crc kubenswrapper[4689]: Trace[281193216]: [14.597050918s] [14.597050918s] END Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.067083 4689 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.068614 4689 trace.go:236] Trace[1927844762]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 10:49:00.047) (total time: 14020ms): Jan 23 10:49:14 crc kubenswrapper[4689]: Trace[1927844762]: ---"Objects listed" error: 14020ms (10:49:14.068) Jan 23 10:49:14 crc kubenswrapper[4689]: Trace[1927844762]: [14.020833395s] [14.020833395s] END Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.068658 4689 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.068626 4689 trace.go:236] Trace[257755069]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 10:48:59.206) (total time: 14862ms): Jan 23 10:49:14 crc kubenswrapper[4689]: Trace[257755069]: ---"Objects listed" error: 14862ms (10:49:14.068) Jan 23 10:49:14 crc kubenswrapper[4689]: Trace[257755069]: [14.862430318s] [14.862430318s] END Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.068722 4689 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.070660 4689 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.170648 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33494->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.170722 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33494->192.168.126.11:17697: read: connection reset by peer" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.170670 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33510->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.170816 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33510->192.168.126.11:17697: read: connection reset by peer" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.170999 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171054 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171078 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171099 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171122 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171163 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171189 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171212 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171233 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171631 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171138 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171765 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171899 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.171989 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172226 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172077 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.172100 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:49:14.672081627 +0000 UTC m=+19.296761486 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172321 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172307 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172377 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172414 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172438 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172364 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172464 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172495 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172523 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172502 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172546 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172574 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172601 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172602 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172606 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172629 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172628 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172701 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172738 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172771 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172804 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172836 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172866 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172895 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172938 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172977 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173005 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173039 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173068 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173094 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173127 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173189 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173223 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173250 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173274 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173303 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173329 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173359 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173388 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173416 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173482 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173509 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173533 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173558 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173580 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173603 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173650 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173674 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173697 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173724 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173745 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173768 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173798 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173827 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173854 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173879 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173905 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173929 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173953 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174014 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174045 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174075 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174103 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.172933 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173139 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173251 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174189 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174223 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174248 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174274 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174298 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174323 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174351 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174381 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174407 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174452 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174479 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174509 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174572 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174597 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174625 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174649 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174675 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174703 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174731 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174757 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174783 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174805 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174826 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174847 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174867 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174890 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174916 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174941 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174966 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174991 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175016 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175042 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175069 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175095 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175120 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175172 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175196 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175223 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175248 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175268 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175290 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175311 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175332 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175355 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175377 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175398 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175421 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175446 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175468 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175490 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175514 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175554 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175581 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175605 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175628 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175651 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175673 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175697 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175718 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175742 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175763 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175788 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175812 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175839 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175862 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175887 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175911 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175933 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175956 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175982 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176006 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176031 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176054 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176079 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176101 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176126 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176305 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176334 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176358 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176382 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176405 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176435 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176458 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176481 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176504 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176529 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176580 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176606 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176630 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176677 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176701 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176724 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176747 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176769 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176794 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176820 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176846 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176871 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176894 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176916 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176937 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176961 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176985 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177009 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177031 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177055 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177080 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177102 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177128 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177171 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177197 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177226 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177253 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177277 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177301 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177326 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177350 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177372 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177396 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177424 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177447 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177472 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177495 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177518 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177541 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177563 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177590 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177613 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177668 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177706 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177735 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177764 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177792 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177823 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177856 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177888 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177912 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177941 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177969 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177995 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178025 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178052 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178182 4689 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178204 4689 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178220 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178235 4689 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178249 4689 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178264 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178278 4689 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178292 4689 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178307 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178320 4689 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178336 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178351 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178986 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173328 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173401 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173562 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173612 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173669 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173796 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173790 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.173883 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174029 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174036 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174032 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174138 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174226 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174259 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174280 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174400 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174550 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174679 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174760 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179023 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.174815 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175003 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.175931 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179331 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.176524 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177349 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177521 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177547 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177695 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.177806 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178710 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178723 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178789 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.178940 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179580 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179588 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179737 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179742 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179878 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.179921 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.180951 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.181131 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.181368 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.181674 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.181730 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.181978 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.182106 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.182442 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.182768 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.182814 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.183512 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.183588 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.183675 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184055 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184339 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184272 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184376 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184466 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184488 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184715 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.184821 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.185065 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.185310 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.186293 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.186494 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.186613 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.186660 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.186681 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.186838 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.186888 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.187448 4689 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.187740 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.188023 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.188165 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.188116 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.188644 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.189113 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.189207 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.189236 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.189647 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.189850 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.189900 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.190096 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.190189 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.190249 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.190310 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.190576 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.190659 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.190993 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.191086 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.191115 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.191164 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.191175 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.191424 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.191736 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.193319 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.193496 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.193674 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.193927 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.194039 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.192043 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.194308 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.194548 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.195120 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.195508 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.195841 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.196191 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.196255 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.196353 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.196552 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.196733 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.197128 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.197277 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.197465 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.197589 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.197675 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:14.697652968 +0000 UTC m=+19.322332827 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.197894 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.197911 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.198082 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.198170 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.198309 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.198511 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.198596 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.198788 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.198881 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.199051 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.199430 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.199448 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.199568 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.199999 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.200163 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.200459 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.200601 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.200968 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.201359 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.201514 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.201794 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.201934 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.202016 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.202230 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.202270 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.202273 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.202520 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.202599 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.203502 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.204020 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.204459 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.204542 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.205127 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.205512 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.206292 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.206634 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.206763 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.207248 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.208212 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.208968 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.208989 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.209328 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.209442 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.209574 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.209808 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.209801 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.210034 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.210074 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.210386 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.210591 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.210757 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.210892 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:14.710853274 +0000 UTC m=+19.335533143 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.210988 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.211431 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.212930 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.213988 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.214182 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.214247 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.214267 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.214304 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.214408 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:14.71434118 +0000 UTC m=+19.339021049 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.217602 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.220230 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.223895 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.225649 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.226127 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.226116 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.226640 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.228418 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.228477 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.228722 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.229382 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.230677 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.230707 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.230721 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.231421 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:14.731355259 +0000 UTC m=+19.356035128 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.231678 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.232014 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.237063 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.238721 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.241755 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.247872 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.248074 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.248163 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.255252 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: W0123 10:49:14.266372 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-db9c6c31d082d561e31eebe6786573988a0d508eafbe3cc721c53c9a55ba4227 WatchSource:0}: Error finding container db9c6c31d082d561e31eebe6786573988a0d508eafbe3cc721c53c9a55ba4227: Status 404 returned error can't find the container with id db9c6c31d082d561e31eebe6786573988a0d508eafbe3cc721c53c9a55ba4227 Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.278854 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.278908 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279010 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279030 4689 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279044 4689 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279059 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279071 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279085 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279100 4689 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279113 4689 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279126 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279139 4689 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279170 4689 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279184 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279197 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279210 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279222 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279246 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279259 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279271 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279282 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279293 4689 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279305 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279320 4689 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279335 4689 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279348 4689 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279361 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279372 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279371 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279384 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279397 4689 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279409 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279422 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279437 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279450 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279463 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279478 4689 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279525 4689 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279540 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279555 4689 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279567 4689 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279579 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279592 4689 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279603 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279616 4689 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279628 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279641 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279654 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279668 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279682 4689 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279694 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279707 4689 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279719 4689 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279734 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279747 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279760 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279790 4689 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279805 4689 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.279819 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280237 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280310 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280326 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280339 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280352 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280372 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280383 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280394 4689 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280405 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280416 4689 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280426 4689 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280458 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280538 4689 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280552 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280565 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280577 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280589 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280600 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280612 4689 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280622 4689 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280634 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280645 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280656 4689 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280667 4689 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280679 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280690 4689 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280702 4689 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280713 4689 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280724 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280738 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280750 4689 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280761 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280774 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280785 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280797 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280683 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280809 4689 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280898 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280918 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280929 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280940 4689 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280971 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280982 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.280993 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281002 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281011 4689 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281021 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281051 4689 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281062 4689 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281073 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281083 4689 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281092 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281123 4689 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281135 4689 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281174 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281190 4689 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281202 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281215 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281227 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281266 4689 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281279 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281291 4689 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281303 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281316 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281346 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281356 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281365 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281375 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281385 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281396 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281407 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281417 4689 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281427 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281437 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281450 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281463 4689 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281502 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281511 4689 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281521 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281530 4689 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281540 4689 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281552 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281568 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281580 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281592 4689 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281852 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.281605 4689 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282069 4689 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282084 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282099 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282113 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282127 4689 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282180 4689 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282199 4689 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282214 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282232 4689 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282274 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282290 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282305 4689 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282321 4689 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282362 4689 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282377 4689 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282392 4689 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282408 4689 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282442 4689 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282457 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282471 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282484 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282519 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282536 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282549 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282562 4689 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282574 4689 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282611 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282624 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282638 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282651 4689 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282686 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282697 4689 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282707 4689 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282717 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282726 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282736 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282746 4689 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282755 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282765 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.282776 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.383552 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.515715 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 10:49:14 crc kubenswrapper[4689]: W0123 10:49:14.526678 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-58782c1950743c14a974774e2ca23243a6e4a294ded47e2ab46451fefbc44c25 WatchSource:0}: Error finding container 58782c1950743c14a974774e2ca23243a6e4a294ded47e2ab46451fefbc44c25: Status 404 returned error can't find the container with id 58782c1950743c14a974774e2ca23243a6e4a294ded47e2ab46451fefbc44c25 Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.533712 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.599009 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 15:35:22.175558368 +0000 UTC Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.617349 4689 csr.go:261] certificate signing request csr-ksrff is approved, waiting to be issued Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.639050 4689 csr.go:257] certificate signing request csr-ksrff is issued Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.639299 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.639471 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.685810 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.686029 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:49:15.686002479 +0000 UTC m=+20.310682338 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.751630 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"db592ce5e6a26783c9199a46fc6a68904f517c99644ca9cfb506a96973ddfdf4"} Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.753020 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"58782c1950743c14a974774e2ca23243a6e4a294ded47e2ab46451fefbc44c25"} Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.755239 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998"} Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.755309 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900"} Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.755321 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"db9c6c31d082d561e31eebe6786573988a0d508eafbe3cc721c53c9a55ba4227"} Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.756398 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.758264 4689 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734" exitCode=255 Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.758308 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734"} Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.766662 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.776744 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.785726 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.786516 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.786687 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.786731 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.786758 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.786799 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.788873 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.788894 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.786849 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.786920 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.788978 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:15.78896153 +0000 UTC m=+20.413641389 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.786947 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.789074 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.789094 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.789172 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:15.789118234 +0000 UTC m=+20.413798093 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.789198 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:15.789188436 +0000 UTC m=+20.413868295 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:14 crc kubenswrapper[4689]: E0123 10:49:14.789654 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:15.789634377 +0000 UTC m=+20.414314236 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.800142 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.803556 4689 scope.go:117] "RemoveContainer" containerID="ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.804251 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.812254 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.828528 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.842704 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.860048 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:14Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.877019 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:14Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.899721 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:14Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.918442 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:14Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.934376 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:14Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:14 crc kubenswrapper[4689]: I0123 10:49:14.950789 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:14Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.303920 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-rcnzm"] Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.304410 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.306231 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.306542 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.307780 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.311467 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-sp7sf"] Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.311820 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.313986 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.313993 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.314284 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.314297 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.314293 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.328801 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.344528 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.357372 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.366230 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.377645 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.390754 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.413035 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.431802 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.452571 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.467851 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.488864 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.491973 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3d8de6cc-a03d-468b-bfe9-fbf544087653-rootfs\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.492021 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3d8de6cc-a03d-468b-bfe9-fbf544087653-proxy-tls\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.492061 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3d8de6cc-a03d-468b-bfe9-fbf544087653-mcd-auth-proxy-config\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.492103 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjrzx\" (UniqueName: \"kubernetes.io/projected/261874ce-6002-4c08-b8f4-d507aae12d40-kube-api-access-sjrzx\") pod \"node-resolver-rcnzm\" (UID: \"261874ce-6002-4c08-b8f4-d507aae12d40\") " pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.492187 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7c5p\" (UniqueName: \"kubernetes.io/projected/3d8de6cc-a03d-468b-bfe9-fbf544087653-kube-api-access-x7c5p\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.492229 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/261874ce-6002-4c08-b8f4-d507aae12d40-hosts-file\") pod \"node-resolver-rcnzm\" (UID: \"261874ce-6002-4c08-b8f4-d507aae12d40\") " pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.508179 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.520922 4689 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.521397 4689 reflector.go:484] object-"openshift-machine-config-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.521486 4689 reflector.go:484] object-"openshift-machine-config-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.521558 4689 reflector.go:484] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.521584 4689 reflector.go:484] object-"openshift-machine-config-operator"/"kube-rbac-proxy": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-rbac-proxy": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.521612 4689 reflector.go:484] object-"openshift-machine-config-operator"/"proxy-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"proxy-tls": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.522513 4689 reflector.go:484] object-"openshift-dns"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.521672 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-console/pods/networking-console-plugin-85b44fc459-gdk6g/status\": read tcp 38.102.83.179:43518->38.102.83.179:6443: use of closed network connection" Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.523313 4689 reflector.go:484] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": watch of *v1.Secret ended with: very short watch: object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.523466 4689 reflector.go:484] object-"openshift-dns"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.527047 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.545232 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.548466 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.567171 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593356 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3d8de6cc-a03d-468b-bfe9-fbf544087653-rootfs\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593414 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3d8de6cc-a03d-468b-bfe9-fbf544087653-proxy-tls\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593443 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3d8de6cc-a03d-468b-bfe9-fbf544087653-mcd-auth-proxy-config\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593472 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjrzx\" (UniqueName: \"kubernetes.io/projected/261874ce-6002-4c08-b8f4-d507aae12d40-kube-api-access-sjrzx\") pod \"node-resolver-rcnzm\" (UID: \"261874ce-6002-4c08-b8f4-d507aae12d40\") " pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593510 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7c5p\" (UniqueName: \"kubernetes.io/projected/3d8de6cc-a03d-468b-bfe9-fbf544087653-kube-api-access-x7c5p\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593539 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/261874ce-6002-4c08-b8f4-d507aae12d40-hosts-file\") pod \"node-resolver-rcnzm\" (UID: \"261874ce-6002-4c08-b8f4-d507aae12d40\") " pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593528 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3d8de6cc-a03d-468b-bfe9-fbf544087653-rootfs\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.593660 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/261874ce-6002-4c08-b8f4-d507aae12d40-hosts-file\") pod \"node-resolver-rcnzm\" (UID: \"261874ce-6002-4c08-b8f4-d507aae12d40\") " pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.594403 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3d8de6cc-a03d-468b-bfe9-fbf544087653-mcd-auth-proxy-config\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.598597 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3d8de6cc-a03d-468b-bfe9-fbf544087653-proxy-tls\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.599335 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 13:11:21.18358741 +0000 UTC Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.605714 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.626290 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7c5p\" (UniqueName: \"kubernetes.io/projected/3d8de6cc-a03d-468b-bfe9-fbf544087653-kube-api-access-x7c5p\") pod \"machine-config-daemon-sp7sf\" (UID: \"3d8de6cc-a03d-468b-bfe9-fbf544087653\") " pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.627055 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjrzx\" (UniqueName: \"kubernetes.io/projected/261874ce-6002-4c08-b8f4-d507aae12d40-kube-api-access-sjrzx\") pod \"node-resolver-rcnzm\" (UID: \"261874ce-6002-4c08-b8f4-d507aae12d40\") " pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.634771 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.639700 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.639823 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.640198 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.640250 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.640669 4689 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-23 10:44:14 +0000 UTC, rotation deadline is 2026-10-26 09:45:04.77949749 +0000 UTC Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.640744 4689 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6622h55m49.13875501s for next certificate rotation Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.643593 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.644182 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.645760 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.646520 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.647786 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.648473 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.649260 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.650385 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.651080 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.651328 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.652055 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.652592 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.655544 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.656080 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.656671 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.657760 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.658353 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.659437 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.659853 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.661821 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.662453 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.663635 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.664268 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.664734 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.665827 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.666286 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.670251 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.671121 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.672123 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.672778 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.673826 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.674410 4689 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.674542 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.674748 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.676825 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.677351 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.678011 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.681283 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.682086 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.682688 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.684170 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.685051 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.687808 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.688599 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.689932 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.691221 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.691324 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.691732 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.692863 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.693457 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.693827 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.694033 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:49:17.694013424 +0000 UTC m=+22.318693283 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.694615 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.695500 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.696401 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.697578 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.698323 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.699105 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.700545 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.701107 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.704627 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.720024 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-xxklh"] Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.720434 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.720969 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.723822 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.723822 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.732585 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.732755 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.732778 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-llpck"] Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.733280 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.734161 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.737862 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.742540 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.763246 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.765046 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751"} Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.765111 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.766675 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073"} Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.771352 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.790341 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.795255 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.795287 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.795310 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.795331 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795407 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795462 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:17.795444827 +0000 UTC m=+22.420124686 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795471 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795480 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795510 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795516 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:17.795504188 +0000 UTC m=+22.420184037 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795523 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795526 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795553 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795565 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795587 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:17.79557178 +0000 UTC m=+22.420251639 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:15 crc kubenswrapper[4689]: E0123 10:49:15.795612 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:17.795597361 +0000 UTC m=+22.420277220 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.806227 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.823198 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.841985 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.859274 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.880955 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896128 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-os-release\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896362 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4776\" (UniqueName: \"kubernetes.io/projected/4127aca0-3ce5-49a4-87e9-34f927efd502-kube-api-access-g4776\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896451 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-cni-bin\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896534 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-hostroot\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896609 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-conf-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896670 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896721 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-cnibin\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896952 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9dgt\" (UniqueName: \"kubernetes.io/projected/d5f32f36-d66c-4202-ac54-e81c6d978146-kube-api-access-r9dgt\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.896984 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-system-cni-dir\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897029 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4127aca0-3ce5-49a4-87e9-34f927efd502-cni-binary-copy\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897050 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-socket-dir-parent\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897066 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-multus-certs\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897094 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4127aca0-3ce5-49a4-87e9-34f927efd502-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897111 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5f32f36-d66c-4202-ac54-e81c6d978146-cni-binary-copy\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897130 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-daemon-config\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897167 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-os-release\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897197 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-k8s-cni-cncf-io\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897238 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-cni-multus\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897286 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-tuning-conf-dir\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897305 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-etc-kubernetes\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897322 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-cnibin\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897338 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-kubelet\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897353 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-netns\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897369 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-cni-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.897387 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-system-cni-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.910658 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.915982 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rcnzm" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.922758 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.925165 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: W0123 10:49:15.937844 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d8de6cc_a03d_468b_bfe9_fbf544087653.slice/crio-021bb2ca3827449a8d84f5d6b1267a657a9095d8ff5e50b82797ca48576fb617 WatchSource:0}: Error finding container 021bb2ca3827449a8d84f5d6b1267a657a9095d8ff5e50b82797ca48576fb617: Status 404 returned error can't find the container with id 021bb2ca3827449a8d84f5d6b1267a657a9095d8ff5e50b82797ca48576fb617 Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.949144 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.963769 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.979172 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:15 crc kubenswrapper[4689]: I0123 10:49:15.994439 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999612 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-system-cni-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999668 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-os-release\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999696 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4776\" (UniqueName: \"kubernetes.io/projected/4127aca0-3ce5-49a4-87e9-34f927efd502-kube-api-access-g4776\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999722 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-cni-bin\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999771 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-hostroot\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999791 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-conf-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999822 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-system-cni-dir\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999843 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-cnibin\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999867 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9dgt\" (UniqueName: \"kubernetes.io/projected/d5f32f36-d66c-4202-ac54-e81c6d978146-kube-api-access-r9dgt\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999891 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4127aca0-3ce5-49a4-87e9-34f927efd502-cni-binary-copy\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999914 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-socket-dir-parent\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999936 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-multus-certs\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:15.999958 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5f32f36-d66c-4202-ac54-e81c6d978146-cni-binary-copy\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000005 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4127aca0-3ce5-49a4-87e9-34f927efd502-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000032 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-daemon-config\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000063 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-os-release\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000092 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-k8s-cni-cncf-io\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000115 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-cni-multus\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000165 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-tuning-conf-dir\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000229 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-cnibin\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000266 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-kubelet\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000288 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-etc-kubernetes\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000322 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-cni-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000346 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-netns\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000426 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-netns\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000521 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-multus-certs\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000633 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-system-cni-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000889 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-os-release\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000941 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-kubelet\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.000939 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-cni-multus\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001182 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-var-lib-cni-bin\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001228 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-hostroot\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001261 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-conf-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001295 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-system-cni-dir\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001325 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-cnibin\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001369 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4127aca0-3ce5-49a4-87e9-34f927efd502-tuning-conf-dir\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001518 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4127aca0-3ce5-49a4-87e9-34f927efd502-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001524 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-socket-dir-parent\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001565 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-etc-kubernetes\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001645 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-os-release\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001675 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-cnibin\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001698 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-host-run-k8s-cni-cncf-io\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.001707 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-cni-dir\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.002103 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4127aca0-3ce5-49a4-87e9-34f927efd502-cni-binary-copy\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.002189 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5f32f36-d66c-4202-ac54-e81c6d978146-cni-binary-copy\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.002288 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d5f32f36-d66c-4202-ac54-e81c6d978146-multus-daemon-config\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.021458 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4776\" (UniqueName: \"kubernetes.io/projected/4127aca0-3ce5-49a4-87e9-34f927efd502-kube-api-access-g4776\") pod \"multus-additional-cni-plugins-llpck\" (UID: \"4127aca0-3ce5-49a4-87e9-34f927efd502\") " pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.021534 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9dgt\" (UniqueName: \"kubernetes.io/projected/d5f32f36-d66c-4202-ac54-e81c6d978146-kube-api-access-r9dgt\") pod \"multus-xxklh\" (UID: \"d5f32f36-d66c-4202-ac54-e81c6d978146\") " pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.022111 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.038359 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.044372 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-xxklh" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.050132 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-llpck" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.052334 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: W0123 10:49:16.071708 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4127aca0_3ce5_49a4_87e9_34f927efd502.slice/crio-cc95134ced18b85077c7b77b9a8d95eeb6e9dab4c91a47630233a59e0777ba71 WatchSource:0}: Error finding container cc95134ced18b85077c7b77b9a8d95eeb6e9dab4c91a47630233a59e0777ba71: Status 404 returned error can't find the container with id cc95134ced18b85077c7b77b9a8d95eeb6e9dab4c91a47630233a59e0777ba71 Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.077958 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.103262 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.114009 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jpm9c"] Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.115040 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.120368 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.120517 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.120585 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.120689 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.120686 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.120795 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.120791 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.122702 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.133873 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.147863 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.162402 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.176253 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.192751 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.212609 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.232005 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.249125 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.269113 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.298192 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303491 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-etc-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303529 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303544 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-bin\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303562 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-systemd-units\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303597 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-node-log\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303612 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-kubelet\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303627 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-script-lib\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303643 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkjb8\" (UniqueName: \"kubernetes.io/projected/5206d70b-3d3b-404c-b969-713242a23d38-kube-api-access-qkjb8\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303659 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-config\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303675 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-slash\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303688 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-ovn\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303703 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-env-overrides\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303734 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-ovn-kubernetes\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303755 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-netd\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303778 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5206d70b-3d3b-404c-b969-713242a23d38-ovn-node-metrics-cert\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303796 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-systemd\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303821 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303840 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-log-socket\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303865 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-netns\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.303882 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-var-lib-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.336482 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.353817 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.368775 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.385028 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.403390 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404644 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5206d70b-3d3b-404c-b969-713242a23d38-ovn-node-metrics-cert\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404684 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-systemd\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404703 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404724 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-netns\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404743 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-log-socket\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404778 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-var-lib-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404802 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-bin\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404844 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-etc-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404857 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-netns\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404903 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404869 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404937 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404981 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-systemd-units\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404991 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-log-socket\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404964 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-var-lib-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404933 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-systemd\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.404959 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-systemd-units\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405031 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-etc-openvswitch\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405047 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-bin\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405198 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-node-log\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405223 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-kubelet\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405279 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-script-lib\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405305 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-node-log\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405300 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkjb8\" (UniqueName: \"kubernetes.io/projected/5206d70b-3d3b-404c-b969-713242a23d38-kube-api-access-qkjb8\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405374 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-kubelet\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405373 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-config\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405435 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-slash\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405459 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-ovn\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405483 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-env-overrides\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405507 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-netd\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.405533 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-ovn-kubernetes\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.407654 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-script-lib\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.408035 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-config\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.408436 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-ovn\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.408540 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-slash\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.409164 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-env-overrides\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.409235 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-netd\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.409385 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-ovn-kubernetes\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.414277 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5206d70b-3d3b-404c-b969-713242a23d38-ovn-node-metrics-cert\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.423720 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.426350 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.428122 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkjb8\" (UniqueName: \"kubernetes.io/projected/5206d70b-3d3b-404c-b969-713242a23d38-kube-api-access-qkjb8\") pod \"ovnkube-node-jpm9c\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.445203 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.461334 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.476617 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.497511 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.506865 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.523546 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.526028 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.542794 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:16 crc kubenswrapper[4689]: W0123 10:49:16.555836 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5206d70b_3d3b_404c_b969_713242a23d38.slice/crio-7df3457bf767010ab3e580b904b15f4ac21e21ff213952416ecee3116ae10999 WatchSource:0}: Error finding container 7df3457bf767010ab3e580b904b15f4ac21e21ff213952416ecee3116ae10999: Status 404 returned error can't find the container with id 7df3457bf767010ab3e580b904b15f4ac21e21ff213952416ecee3116ae10999 Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.570205 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.585262 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.588076 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.599511 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 18:04:53.710744276 +0000 UTC Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.599579 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.611197 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.635397 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.639363 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:16 crc kubenswrapper[4689]: E0123 10:49:16.639507 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.771520 4689 generic.go:334] "Generic (PLEG): container finished" podID="4127aca0-3ce5-49a4-87e9-34f927efd502" containerID="9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641" exitCode=0 Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.771670 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerDied","Data":"9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.771821 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerStarted","Data":"cc95134ced18b85077c7b77b9a8d95eeb6e9dab4c91a47630233a59e0777ba71"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.773363 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-xxklh" event={"ID":"d5f32f36-d66c-4202-ac54-e81c6d978146","Type":"ContainerStarted","Data":"66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.773411 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-xxklh" event={"ID":"d5f32f36-d66c-4202-ac54-e81c6d978146","Type":"ContainerStarted","Data":"f2eb8f711a7c66837a0ee1b6048b9bdcfc5ce5295b4921a70777bb1b11ab1013"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.774830 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"7df3457bf767010ab3e580b904b15f4ac21e21ff213952416ecee3116ae10999"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.776848 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.776974 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.777078 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"021bb2ca3827449a8d84f5d6b1267a657a9095d8ff5e50b82797ca48576fb617"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.778262 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rcnzm" event={"ID":"261874ce-6002-4c08-b8f4-d507aae12d40","Type":"ContainerStarted","Data":"413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.778330 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rcnzm" event={"ID":"261874ce-6002-4c08-b8f4-d507aae12d40","Type":"ContainerStarted","Data":"0740172dd44efc2312fa0df9d93467c4d5813c7a38a71035b985304326974173"} Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.790012 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.803054 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.815923 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.828449 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.843541 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.860883 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.894421 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.937111 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.977276 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:16Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:16 crc kubenswrapper[4689]: I0123 10:49:16.986975 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.026810 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.067424 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.084717 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.120931 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.126099 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.181778 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.223340 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.255757 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.297554 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.334758 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.376591 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.415312 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.454138 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.496821 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.536369 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.574077 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.599668 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 00:34:14.170936327 +0000 UTC Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.618010 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.625536 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-rmzx9"] Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.626043 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.639338 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.639395 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.639491 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.639630 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.649420 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.667239 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.686971 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.706493 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.720897 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.721016 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:49:21.720998186 +0000 UTC m=+26.345678045 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.736865 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.773565 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.782084 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a"} Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.783749 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f" exitCode=0 Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.783801 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f"} Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.786500 4689 generic.go:334] "Generic (PLEG): container finished" podID="4127aca0-3ce5-49a4-87e9-34f927efd502" containerID="2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff" exitCode=0 Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.786553 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerDied","Data":"2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff"} Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.821722 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.821777 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5da240e2-abc7-443a-89d5-65297d2a14ea-serviceca\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.821830 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5da240e2-abc7-443a-89d5-65297d2a14ea-host\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.821853 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4lg8\" (UniqueName: \"kubernetes.io/projected/5da240e2-abc7-443a-89d5-65297d2a14ea-kube-api-access-k4lg8\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.821874 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.821897 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.821919 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822013 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822070 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:21.822054059 +0000 UTC m=+26.446733918 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822468 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822536 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:21.82251686 +0000 UTC m=+26.447196909 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822528 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822575 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822591 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822676 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:21.822639753 +0000 UTC m=+26.447319792 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822734 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822765 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822782 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:17 crc kubenswrapper[4689]: E0123 10:49:17.822850 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:21.822823168 +0000 UTC m=+26.447503047 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.824231 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.862812 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.895868 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.922608 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5da240e2-abc7-443a-89d5-65297d2a14ea-host\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.923024 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5da240e2-abc7-443a-89d5-65297d2a14ea-serviceca\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.923296 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4lg8\" (UniqueName: \"kubernetes.io/projected/5da240e2-abc7-443a-89d5-65297d2a14ea-kube-api-access-k4lg8\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.922975 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5da240e2-abc7-443a-89d5-65297d2a14ea-host\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.926654 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5da240e2-abc7-443a-89d5-65297d2a14ea-serviceca\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.936657 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.965313 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4lg8\" (UniqueName: \"kubernetes.io/projected/5da240e2-abc7-443a-89d5-65297d2a14ea-kube-api-access-k4lg8\") pod \"node-ca-rmzx9\" (UID: \"5da240e2-abc7-443a-89d5-65297d2a14ea\") " pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:17 crc kubenswrapper[4689]: I0123 10:49:17.994469 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:17Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.035399 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.083174 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.122826 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.158470 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.195716 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.234358 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.238004 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-rmzx9" Jan 23 10:49:18 crc kubenswrapper[4689]: W0123 10:49:18.253250 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5da240e2_abc7_443a_89d5_65297d2a14ea.slice/crio-e3e3c5340b78115f47fed075c07cf5834da53d9ca245a03310e4cd493b8ca511 WatchSource:0}: Error finding container e3e3c5340b78115f47fed075c07cf5834da53d9ca245a03310e4cd493b8ca511: Status 404 returned error can't find the container with id e3e3c5340b78115f47fed075c07cf5834da53d9ca245a03310e4cd493b8ca511 Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.278221 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.321003 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.355469 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.397053 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.435371 4689 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.437109 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.437164 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.437178 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.437363 4689 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.440183 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.488135 4689 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.488252 4689 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.489898 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.489942 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.489958 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.489977 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.489992 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: E0123 10:49:18.509067 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.513418 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.513451 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.513460 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.513509 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.513521 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.516699 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: E0123 10:49:18.525289 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.529741 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.529772 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.529782 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.529796 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.529808 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: E0123 10:49:18.540546 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.544390 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.544426 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.544438 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.544454 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.544466 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: E0123 10:49:18.554177 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.556327 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.556868 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.556905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.556919 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.556936 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.556947 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: E0123 10:49:18.569604 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: E0123 10:49:18.569775 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.571474 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.571514 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.571523 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.571537 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.571549 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.599066 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.599887 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 20:23:40.733808862 +0000 UTC Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.603082 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.615031 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.636445 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.639342 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:18 crc kubenswrapper[4689]: E0123 10:49:18.639491 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.674226 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.674265 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.674278 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.674295 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.674306 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.678895 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.727328 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.767938 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.777309 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.777378 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.777389 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.777404 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.777416 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.795138 4689 generic.go:334] "Generic (PLEG): container finished" podID="4127aca0-3ce5-49a4-87e9-34f927efd502" containerID="a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d" exitCode=0 Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.795205 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerDied","Data":"a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.796693 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-rmzx9" event={"ID":"5da240e2-abc7-443a-89d5-65297d2a14ea","Type":"ContainerStarted","Data":"5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.796730 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-rmzx9" event={"ID":"5da240e2-abc7-443a-89d5-65297d2a14ea","Type":"ContainerStarted","Data":"e3e3c5340b78115f47fed075c07cf5834da53d9ca245a03310e4cd493b8ca511"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.800769 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.800842 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.800860 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.800875 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.800887 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.800898 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.807993 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.835186 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.881122 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.881185 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.881198 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.881215 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.881227 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.881196 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.915276 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.954654 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.983504 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.983537 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.983545 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.983560 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.983569 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:18Z","lastTransitionTime":"2026-01-23T10:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:18 crc kubenswrapper[4689]: I0123 10:49:18.995784 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:18Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.034304 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.075285 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.086042 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.086105 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.086123 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.086170 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.086187 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.112389 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.160606 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.189711 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.189768 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.189792 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.189821 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.189843 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.200739 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.240121 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.279707 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.292022 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.292088 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.292104 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.292124 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.292136 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.317530 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.362655 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.395069 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.395103 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.395113 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.395128 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.395140 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.397495 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.438694 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.511215 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.511256 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.511272 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.511286 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.511295 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.516349 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.540395 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.555213 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.597843 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.600888 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 18:27:46.808288875 +0000 UTC Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.613006 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.613050 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.613061 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.613076 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.613089 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.638501 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.639814 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.639827 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:19 crc kubenswrapper[4689]: E0123 10:49:19.640018 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:19 crc kubenswrapper[4689]: E0123 10:49:19.640171 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.674179 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.716087 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.716133 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.716142 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.716172 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.716183 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.717794 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.754521 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.807453 4689 generic.go:334] "Generic (PLEG): container finished" podID="4127aca0-3ce5-49a4-87e9-34f927efd502" containerID="3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac" exitCode=0 Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.807517 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerDied","Data":"3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.819127 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.819200 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.819216 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.819239 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.819253 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.827711 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.843442 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.878551 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.920825 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.923792 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.923826 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.923837 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.923857 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.923871 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:19Z","lastTransitionTime":"2026-01-23T10:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:19 crc kubenswrapper[4689]: I0123 10:49:19.954441 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.000879 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:19Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.028211 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.028277 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.028289 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.028313 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.028327 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.045190 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.077566 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.114619 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.130769 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.130818 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.130833 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.130854 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.130869 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.153834 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.197021 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.234088 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.234124 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.234132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.234158 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.234196 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.235011 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.273983 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.317041 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.336451 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.336493 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.336500 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.336531 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.336541 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.359122 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.439075 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.439118 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.439127 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.439143 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.439172 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.541193 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.541226 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.541235 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.541249 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.541259 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.601256 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 19:32:38.692797076 +0000 UTC Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.639612 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:20 crc kubenswrapper[4689]: E0123 10:49:20.639761 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.642857 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.642887 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.642895 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.642907 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.642915 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.745757 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.745811 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.745824 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.745842 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.745858 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.815329 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.819252 4689 generic.go:334] "Generic (PLEG): container finished" podID="4127aca0-3ce5-49a4-87e9-34f927efd502" containerID="60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2" exitCode=0 Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.819298 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerDied","Data":"60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.834797 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.848828 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.849125 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.849230 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.849260 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.849270 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.860588 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.876823 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.896975 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.909657 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.920389 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.935534 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.956885 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.958024 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.958058 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.958069 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.958088 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.958101 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:20Z","lastTransitionTime":"2026-01-23T10:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.970424 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:20 crc kubenswrapper[4689]: I0123 10:49:20.984925 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.001741 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:20Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.015246 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.027576 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.042894 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.057428 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.060915 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.060948 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.060958 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.060972 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.060983 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.164092 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.164132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.164141 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.164178 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.164190 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.266642 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.266681 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.266691 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.266709 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.266722 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.369451 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.369519 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.369537 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.369569 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.369592 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.472429 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.472488 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.472505 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.472535 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.472551 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.575501 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.575556 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.575574 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.575601 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.575895 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.602076 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 22:23:24.671658023 +0000 UTC Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.639707 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.639778 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.639877 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.639935 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.678623 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.678681 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.678692 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.678713 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.678726 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.763978 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.764337 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:49:29.764297283 +0000 UTC m=+34.388977142 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.781881 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.781935 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.781949 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.781973 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.781991 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.826746 4689 generic.go:334] "Generic (PLEG): container finished" podID="4127aca0-3ce5-49a4-87e9-34f927efd502" containerID="bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0" exitCode=0 Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.826818 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerDied","Data":"bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.856652 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.865676 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.865818 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.865856 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.865909 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.866754 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.866794 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.866834 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.866850 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.866844 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:29.866824354 +0000 UTC m=+34.491504213 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.866915 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:29.866892766 +0000 UTC m=+34.491572625 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.867051 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.867133 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:29.867084221 +0000 UTC m=+34.491764090 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.867586 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.867636 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.867657 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:21 crc kubenswrapper[4689]: E0123 10:49:21.867737 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:29.867709686 +0000 UTC m=+34.492389575 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.884565 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.890740 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.890779 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.890789 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.890805 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.890818 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.908077 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.926118 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.942222 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.956094 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.974577 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.990185 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:21Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.993592 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.993628 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.993664 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.993686 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:21 crc kubenswrapper[4689]: I0123 10:49:21.993698 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:21Z","lastTransitionTime":"2026-01-23T10:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.003214 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.022537 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.038777 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.056170 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.071249 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.085296 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.097020 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.097082 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.097095 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.097115 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.097131 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.106099 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.199623 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.199685 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.199704 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.199797 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.199813 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.303027 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.303083 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.303097 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.303115 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.303127 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.406182 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.406217 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.406226 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.406239 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.406249 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.508997 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.509076 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.509101 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.509132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.509189 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.602810 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 08:50:36.519251215 +0000 UTC Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.611766 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.611807 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.611819 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.611839 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.611852 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.639031 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:22 crc kubenswrapper[4689]: E0123 10:49:22.639131 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.714370 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.714401 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.714410 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.714423 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.714432 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.817209 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.817260 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.817272 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.817290 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.817303 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.835524 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" event={"ID":"4127aca0-3ce5-49a4-87e9-34f927efd502","Type":"ContainerStarted","Data":"532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.841007 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.841486 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.841618 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.863808 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.874493 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.875777 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.882183 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.894472 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.903822 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.921564 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.921593 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.921601 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.921613 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.921622 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:22Z","lastTransitionTime":"2026-01-23T10:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.925081 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.938196 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.951538 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.963824 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.974564 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.986536 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:22 crc kubenswrapper[4689]: I0123 10:49:22.998316 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:22Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.009448 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.020102 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.023840 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.023862 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.023871 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.023884 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.023894 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.034280 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.046331 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.071293 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.085657 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.096239 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.108946 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.126253 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.126291 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.126302 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.126319 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.126330 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.126540 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.137343 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.148141 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.159090 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.169237 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.177681 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.189505 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.205711 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.218113 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.228825 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.228878 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.228890 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.228908 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.228920 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.235869 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.247578 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:23Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.331897 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.331955 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.331969 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.331992 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.332006 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.435482 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.435548 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.435570 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.435594 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.435612 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.538895 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.538947 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.538962 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.538982 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.538995 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.603607 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 12:26:04.337969372 +0000 UTC Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.640210 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.640309 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:23 crc kubenswrapper[4689]: E0123 10:49:23.640458 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:23 crc kubenswrapper[4689]: E0123 10:49:23.640572 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.641778 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.641817 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.641832 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.641855 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.641870 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.744541 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.744584 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.744593 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.744613 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.744623 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.843670 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.846643 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.846679 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.846692 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.846707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.846718 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.949134 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.949224 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.949243 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.949264 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:23 crc kubenswrapper[4689]: I0123 10:49:23.949280 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:23Z","lastTransitionTime":"2026-01-23T10:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.051687 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.051751 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.051769 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.051792 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.051811 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.154967 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.155037 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.155048 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.155066 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.155078 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.258111 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.258211 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.258237 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.258265 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.258286 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.360750 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.360791 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.360806 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.360821 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.360830 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.463180 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.463220 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.463229 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.463243 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.463253 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.565551 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.565592 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.565600 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.565614 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.565628 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.604468 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 10:34:27.4848301 +0000 UTC Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.639760 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:24 crc kubenswrapper[4689]: E0123 10:49:24.639883 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.667486 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.667528 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.667537 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.667553 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.667563 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.770435 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.770476 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.770484 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.770518 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.770527 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.847059 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.872906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.872974 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.872995 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.873072 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.873094 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.975971 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.976056 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.976082 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.976114 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:24 crc kubenswrapper[4689]: I0123 10:49:24.976138 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:24Z","lastTransitionTime":"2026-01-23T10:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.079809 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.079863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.079873 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.079888 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.079898 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.182770 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.182826 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.182842 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.182864 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.182880 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.285941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.285982 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.285990 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.286003 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.286013 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.388202 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.388270 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.388283 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.388302 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.388318 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.490581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.490629 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.490638 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.490653 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.490662 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.593970 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.594052 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.594072 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.594137 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.594195 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.604557 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 09:19:22.201048472 +0000 UTC Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.639190 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.639190 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:25 crc kubenswrapper[4689]: E0123 10:49:25.639328 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:25 crc kubenswrapper[4689]: E0123 10:49:25.639643 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.673717 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.688082 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.696683 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.696722 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.696733 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.696749 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.696763 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.703234 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.714820 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.746621 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.761039 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.775314 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.793070 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.798905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.798935 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.798945 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.798960 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.798971 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.808662 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.820236 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.833079 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.853514 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/0.log" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.858925 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.862071 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f" exitCode=1 Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.862120 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.862801 4689 scope.go:117] "RemoveContainer" containerID="1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.878616 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.901384 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.903642 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.903707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.903725 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.903747 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.903764 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:25Z","lastTransitionTime":"2026-01-23T10:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.917115 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.940292 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.957000 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.973437 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:25 crc kubenswrapper[4689]: I0123 10:49:25.989376 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:25Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.006493 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.006537 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.006548 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.006565 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.006576 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.010521 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.028530 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.042504 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.055206 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.074020 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:25Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952000 5992 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:24.952068 5992 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:24.952208 5992 factory.go:656] Stopping watch factory\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 10:49:24.952345 5992 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0123 10:49:24.952411 5992 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:24.952477 5992 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:24.952530 5992 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952083 5992 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952560 5992 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.087495 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.102223 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.108411 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.108452 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.108461 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.108474 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.108483 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.122426 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.135208 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.144364 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.155292 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.211749 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.211801 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.211812 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.211832 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.211843 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.314074 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.314107 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.314115 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.314128 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.314137 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.416253 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.416289 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.416298 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.416311 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.416319 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.518771 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.518823 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.518840 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.518863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.518878 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.630222 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 16:38:39.195536599 +0000 UTC Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.631861 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.631887 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.631896 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.631910 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.631922 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.639556 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:26 crc kubenswrapper[4689]: E0123 10:49:26.639676 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.734211 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.734246 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.734254 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.734269 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.734280 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.837145 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.837237 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.837257 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.837280 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.837296 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.869657 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/0.log" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.876903 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.877076 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.897907 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.917135 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.935756 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.940922 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.941140 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.941703 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.942050 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.942492 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:26Z","lastTransitionTime":"2026-01-23T10:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.954319 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.976977 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:26 crc kubenswrapper[4689]: I0123 10:49:26.997690 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:26Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.025668 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.045782 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.045843 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.045853 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.045874 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.045889 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.049856 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.072183 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.097313 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.111838 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.144939 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:25Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952000 5992 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:24.952068 5992 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:24.952208 5992 factory.go:656] Stopping watch factory\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 10:49:24.952345 5992 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0123 10:49:24.952411 5992 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:24.952477 5992 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:24.952530 5992 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952083 5992 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952560 5992 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.149076 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.149105 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.149117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.149134 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.149165 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.171998 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.189051 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.206367 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.252603 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.252670 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.252688 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.252712 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.252736 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.356782 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.357360 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.357950 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.358063 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.358174 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.461465 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.464559 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.464912 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.465274 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.465414 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.568688 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.568733 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.568742 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.568758 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.568768 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.630879 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 02:41:54.382856362 +0000 UTC Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.639277 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:27 crc kubenswrapper[4689]: E0123 10:49:27.639664 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.639336 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:27 crc kubenswrapper[4689]: E0123 10:49:27.640397 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.671982 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.672463 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.672775 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.672933 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.673340 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.776197 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.776241 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.776253 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.776273 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.776286 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.878928 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.879277 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.879374 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.879457 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.879526 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.900268 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.915237 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.949389 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:25Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952000 5992 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:24.952068 5992 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:24.952208 5992 factory.go:656] Stopping watch factory\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 10:49:24.952345 5992 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0123 10:49:24.952411 5992 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:24.952477 5992 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:24.952530 5992 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952083 5992 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952560 5992 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.972015 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.993361 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.993673 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.993827 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.994069 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.994983 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:27Z","lastTransitionTime":"2026-01-23T10:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:27 crc kubenswrapper[4689]: I0123 10:49:27.996722 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:27Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.016557 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.031880 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.047885 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.066033 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.079612 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.094093 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.098221 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.098267 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.098278 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.098333 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.098351 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.109292 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.128346 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.149371 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.165657 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.173521 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn"] Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.174203 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.176546 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.176837 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.193122 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.200727 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.200802 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.200828 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.200899 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.200929 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.216353 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.235436 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.244476 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wb44\" (UniqueName: \"kubernetes.io/projected/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-kube-api-access-2wb44\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.244510 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.244530 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.244548 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.252913 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.274265 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.288458 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.302466 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.303698 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.303774 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.303792 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.303818 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.303833 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.313577 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.330639 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:25Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952000 5992 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:24.952068 5992 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:24.952208 5992 factory.go:656] Stopping watch factory\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 10:49:24.952345 5992 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0123 10:49:24.952411 5992 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:24.952477 5992 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:24.952530 5992 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952083 5992 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952560 5992 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.341803 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.345205 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wb44\" (UniqueName: \"kubernetes.io/projected/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-kube-api-access-2wb44\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.345290 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.345347 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.345383 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.346138 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.346361 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.355857 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.361445 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.365873 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wb44\" (UniqueName: \"kubernetes.io/projected/67c055d4-ebe7-45e0-b8fa-b597e3c7350e-kube-api-access-2wb44\") pod \"ovnkube-control-plane-749d76644c-wvgzn\" (UID: \"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.377084 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.389369 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.401654 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.407436 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.407470 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.407480 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.407496 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.407508 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.413927 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.426009 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.442503 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.488618 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" Jan 23 10:49:28 crc kubenswrapper[4689]: W0123 10:49:28.502276 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67c055d4_ebe7_45e0_b8fa_b597e3c7350e.slice/crio-bf529c6329d01ce6bc51233890a4253a45f8fa1eda0a75a5ead3218d933f05d0 WatchSource:0}: Error finding container bf529c6329d01ce6bc51233890a4253a45f8fa1eda0a75a5ead3218d933f05d0: Status 404 returned error can't find the container with id bf529c6329d01ce6bc51233890a4253a45f8fa1eda0a75a5ead3218d933f05d0 Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.509428 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.509498 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.509518 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.509555 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.509571 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.612864 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.612921 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.612934 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.612957 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.612978 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.632183 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 22:40:37.46322578 +0000 UTC Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.639613 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.639766 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.651132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.651186 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.651195 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.651211 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.651221 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.667991 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.675416 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.675461 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.675472 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.675487 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.675496 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.687870 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.691520 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.691589 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.691607 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.691633 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.691651 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.704405 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.708327 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.708374 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.708392 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.708415 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.708430 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.721511 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.724941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.724985 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.724996 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.725013 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.725028 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.741342 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.741571 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.744733 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.744771 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.744783 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.744796 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.744804 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.847213 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.847315 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.847342 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.847401 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.847433 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.886477 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" event={"ID":"67c055d4-ebe7-45e0-b8fa-b597e3c7350e","Type":"ContainerStarted","Data":"bf529c6329d01ce6bc51233890a4253a45f8fa1eda0a75a5ead3218d933f05d0"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.888503 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/1.log" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.889328 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/0.log" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.892878 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52" exitCode=1 Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.892937 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.893026 4689 scope.go:117] "RemoveContainer" containerID="1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.893875 4689 scope.go:117] "RemoveContainer" containerID="063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52" Jan 23 10:49:28 crc kubenswrapper[4689]: E0123 10:49:28.894118 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\"" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.913847 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.929429 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.943687 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.951812 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.951847 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.951855 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.951869 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.951882 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:28Z","lastTransitionTime":"2026-01-23T10:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.968518 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:25Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952000 5992 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:24.952068 5992 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:24.952208 5992 factory.go:656] Stopping watch factory\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 10:49:24.952345 5992 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0123 10:49:24.952411 5992 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:24.952477 5992 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:24.952530 5992 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952083 5992 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952560 5992 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:28 crc kubenswrapper[4689]: I0123 10:49:28.995272 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:28Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.011078 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.026049 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.040003 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.053567 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.054268 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.054314 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.054325 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.054342 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.054356 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.068409 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.086278 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.100270 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.120890 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.141432 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.156881 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.156941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.156954 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.156976 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.156990 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.157565 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.172355 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:29Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.260230 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.260276 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.260289 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.260309 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.260327 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.363161 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.363208 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.363218 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.363235 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.363250 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.466426 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.466560 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.466584 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.466607 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.466625 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.568920 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.568985 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.569007 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.569034 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.569053 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.632636 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 02:48:33.672353638 +0000 UTC Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.639302 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.639372 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.639517 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.639645 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.672463 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.672537 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.672555 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.672579 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.672597 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.766518 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.766774 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:49:45.766755625 +0000 UTC m=+50.391435484 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.776276 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.776340 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.776354 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.776375 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.776392 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.867988 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.868053 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.868168 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.868216 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868332 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868407 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868457 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868455 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868514 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868545 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868558 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:45.868523136 +0000 UTC m=+50.493203035 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868562 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868603 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:45.868572308 +0000 UTC m=+50.493252207 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868370 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868641 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:45.868626069 +0000 UTC m=+50.493306168 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:29 crc kubenswrapper[4689]: E0123 10:49:29.868676 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:49:45.8686564 +0000 UTC m=+50.493336469 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.879225 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.879260 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.879271 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.879287 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.879300 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.982497 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.982550 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.982565 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.982581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:29 crc kubenswrapper[4689]: I0123 10:49:29.982594 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:29Z","lastTransitionTime":"2026-01-23T10:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.043336 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-cpc6c"] Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.043859 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:30 crc kubenswrapper[4689]: E0123 10:49:30.043925 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.066623 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.069602 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.069815 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7zhk\" (UniqueName: \"kubernetes.io/projected/01ee7060-466f-4294-934f-3df3b9aa7afe-kube-api-access-z7zhk\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.086458 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.086890 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.086942 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.086955 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.086973 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.086986 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.105127 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.128433 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.154964 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.170283 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.170423 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7zhk\" (UniqueName: \"kubernetes.io/projected/01ee7060-466f-4294-934f-3df3b9aa7afe-kube-api-access-z7zhk\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.170470 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:30 crc kubenswrapper[4689]: E0123 10:49:30.170567 4689 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:30 crc kubenswrapper[4689]: E0123 10:49:30.170614 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs podName:01ee7060-466f-4294-934f-3df3b9aa7afe nodeName:}" failed. No retries permitted until 2026-01-23 10:49:30.67060152 +0000 UTC m=+35.295281369 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs") pod "network-metrics-daemon-cpc6c" (UID: "01ee7060-466f-4294-934f-3df3b9aa7afe") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.188228 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.189540 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.189578 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.189590 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.189605 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.189615 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.195838 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7zhk\" (UniqueName: \"kubernetes.io/projected/01ee7060-466f-4294-934f-3df3b9aa7afe-kube-api-access-z7zhk\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.206368 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.223350 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:25Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952000 5992 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:24.952068 5992 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:24.952208 5992 factory.go:656] Stopping watch factory\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 10:49:24.952345 5992 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0123 10:49:24.952411 5992 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:24.952477 5992 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:24.952530 5992 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952083 5992 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952560 5992 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.234268 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.250625 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.262327 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.278639 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.291220 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.293169 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.293204 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.293227 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.293242 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.293252 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.306604 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.319070 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.333019 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.396552 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.396593 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.396606 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.396623 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.396636 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.499739 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.499841 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.499863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.499891 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.499910 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.602134 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.602201 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.602215 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.602231 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.602241 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.633143 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 15:02:51.288641951 +0000 UTC Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.639662 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:30 crc kubenswrapper[4689]: E0123 10:49:30.639898 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.674836 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:30 crc kubenswrapper[4689]: E0123 10:49:30.675023 4689 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:30 crc kubenswrapper[4689]: E0123 10:49:30.675136 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs podName:01ee7060-466f-4294-934f-3df3b9aa7afe nodeName:}" failed. No retries permitted until 2026-01-23 10:49:31.675107761 +0000 UTC m=+36.299787660 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs") pod "network-metrics-daemon-cpc6c" (UID: "01ee7060-466f-4294-934f-3df3b9aa7afe") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.705176 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.705223 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.705234 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.705248 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.705260 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.808494 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.808543 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.808554 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.808570 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.808580 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.904238 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/1.log" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.911416 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.911483 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.911501 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.911523 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.911542 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:30Z","lastTransitionTime":"2026-01-23T10:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.911956 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" event={"ID":"67c055d4-ebe7-45e0-b8fa-b597e3c7350e","Type":"ContainerStarted","Data":"3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.912055 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" event={"ID":"67c055d4-ebe7-45e0-b8fa-b597e3c7350e","Type":"ContainerStarted","Data":"5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9"} Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.933822 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.958705 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.972657 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:30 crc kubenswrapper[4689]: I0123 10:49:30.996927 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:30Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.011707 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.014050 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.014132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.014202 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.014227 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.014243 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.026355 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.039365 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.059859 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d33d9c33dfdda1f58f90c99430d672c3cf8ada193637a826e72aec184a7074f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:25Z\\\",\\\"message\\\":\\\"ub.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952000 5992 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:24.952068 5992 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:24.952208 5992 factory.go:656] Stopping watch factory\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 10:49:24.952345 5992 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0123 10:49:24.952411 5992 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:24.952477 5992 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:24.951757 5992 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:24.952530 5992 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952083 5992 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 10:49:24.952560 5992 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.075800 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.094570 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.112854 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.117001 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.117067 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.117091 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.117117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.117135 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.130743 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.146217 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.159955 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.175795 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.195763 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.212837 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:31Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.220333 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.220432 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.220458 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.220496 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.220519 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.323791 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.323850 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.323861 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.323885 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.323905 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.426946 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.426997 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.427011 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.427032 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.427042 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.535241 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.535367 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.535411 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.535447 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.535474 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.634130 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 08:14:26.475465968 +0000 UTC Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.639495 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.639531 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:31 crc kubenswrapper[4689]: E0123 10:49:31.639669 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.639702 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:31 crc kubenswrapper[4689]: E0123 10:49:31.639765 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.639543 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.639790 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.639805 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.639816 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.640019 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:31 crc kubenswrapper[4689]: E0123 10:49:31.640072 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.686061 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:31 crc kubenswrapper[4689]: E0123 10:49:31.686225 4689 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:31 crc kubenswrapper[4689]: E0123 10:49:31.686277 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs podName:01ee7060-466f-4294-934f-3df3b9aa7afe nodeName:}" failed. No retries permitted until 2026-01-23 10:49:33.686262203 +0000 UTC m=+38.310942062 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs") pod "network-metrics-daemon-cpc6c" (UID: "01ee7060-466f-4294-934f-3df3b9aa7afe") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.742834 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.742875 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.742886 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.742906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.742919 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.846768 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.846832 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.846844 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.846863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.846875 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.950794 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.950856 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.950873 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.950897 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:31 crc kubenswrapper[4689]: I0123 10:49:31.950915 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:31Z","lastTransitionTime":"2026-01-23T10:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.053645 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.053719 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.053734 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.053754 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.053766 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.157065 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.157118 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.157133 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.157177 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.157194 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.260104 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.260170 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.260183 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.260233 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.260252 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.363343 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.363399 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.363415 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.363434 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.363448 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.466665 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.466724 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.466736 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.466753 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.466770 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.569112 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.569186 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.569203 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.569219 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.569231 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.634505 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 20:06:00.272217836 +0000 UTC Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.639792 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:32 crc kubenswrapper[4689]: E0123 10:49:32.639897 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.671271 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.671313 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.671324 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.671341 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.671352 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.773474 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.773562 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.773582 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.773608 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.773627 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.876644 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.876704 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.876715 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.876733 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.876745 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.979971 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.980092 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.980118 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.980188 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:32 crc kubenswrapper[4689]: I0123 10:49:32.980214 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:32Z","lastTransitionTime":"2026-01-23T10:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.083001 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.083044 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.083056 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.083074 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.083085 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.186706 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.186770 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.186782 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.186804 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.186820 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.290300 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.290364 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.290374 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.290396 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.290407 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.393338 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.393426 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.393450 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.393480 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.393506 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.496802 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.496888 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.496925 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.496958 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.496982 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.599609 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.599688 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.599705 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.599731 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.599749 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.636120 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 05:33:52.84568811 +0000 UTC Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.639681 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.639741 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.639695 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:33 crc kubenswrapper[4689]: E0123 10:49:33.639990 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:33 crc kubenswrapper[4689]: E0123 10:49:33.640139 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:33 crc kubenswrapper[4689]: E0123 10:49:33.640377 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.702796 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.702863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.702884 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.702915 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.702938 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.708357 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:33 crc kubenswrapper[4689]: E0123 10:49:33.708578 4689 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:33 crc kubenswrapper[4689]: E0123 10:49:33.708684 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs podName:01ee7060-466f-4294-934f-3df3b9aa7afe nodeName:}" failed. No retries permitted until 2026-01-23 10:49:37.70865387 +0000 UTC m=+42.333333729 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs") pod "network-metrics-daemon-cpc6c" (UID: "01ee7060-466f-4294-934f-3df3b9aa7afe") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.806625 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.806691 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.806710 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.806736 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.806755 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.909753 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.909834 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.909861 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.909893 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:33 crc kubenswrapper[4689]: I0123 10:49:33.909921 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:33Z","lastTransitionTime":"2026-01-23T10:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.014236 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.014303 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.014327 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.014357 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.014378 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.116624 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.116652 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.116660 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.116673 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.116682 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.219504 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.219546 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.219557 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.219572 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.219585 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.322125 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.322178 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.322188 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.322201 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.322212 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.425129 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.425203 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.425220 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.425240 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.425255 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.528193 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.528261 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.528282 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.528306 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.528323 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.631548 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.631616 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.631639 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.632300 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.632328 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.636970 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 21:40:58.188797475 +0000 UTC Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.639329 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:34 crc kubenswrapper[4689]: E0123 10:49:34.639488 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.734964 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.735002 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.735011 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.735029 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.735041 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.837192 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.837238 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.837247 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.837263 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.837273 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.940409 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.940721 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.940742 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.940769 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:34 crc kubenswrapper[4689]: I0123 10:49:34.940789 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:34Z","lastTransitionTime":"2026-01-23T10:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.045965 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.046044 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.046057 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.046078 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.046092 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.114717 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.115956 4689 scope.go:117] "RemoveContainer" containerID="063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52" Jan 23 10:49:35 crc kubenswrapper[4689]: E0123 10:49:35.116241 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\"" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.135611 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.149266 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.149311 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.149323 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.149340 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.149352 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.155377 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.176049 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.202815 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.221669 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.236420 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.252083 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.252127 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.252186 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.252208 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.252221 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.270214 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.283536 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.307865 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.322275 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.337419 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.349669 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.354862 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.354902 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.354914 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.354930 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.354942 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.361628 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.373631 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.385964 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.399035 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.416606 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.458489 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.458551 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.458564 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.458587 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.458599 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.562225 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.562286 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.562296 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.562319 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.562331 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.637204 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 19:19:07.773499552 +0000 UTC Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.639586 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:35 crc kubenswrapper[4689]: E0123 10:49:35.639800 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.639633 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:35 crc kubenswrapper[4689]: E0123 10:49:35.640268 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.639605 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:35 crc kubenswrapper[4689]: E0123 10:49:35.640453 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.658139 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.665456 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.665513 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.665552 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.665569 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.665581 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.673467 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.686310 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.700242 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.726628 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.737249 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.765805 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.767542 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.767655 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.767736 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.767810 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.767877 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.781135 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.798087 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.810623 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.825434 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.839272 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.851996 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.867094 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.869884 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.869914 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.869924 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.869939 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.869951 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.879479 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.891132 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.901389 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:35Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.971782 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.971807 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.971814 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.971827 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:35 crc kubenswrapper[4689]: I0123 10:49:35.971838 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:35Z","lastTransitionTime":"2026-01-23T10:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.074209 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.074245 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.074254 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.074266 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.074276 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.176902 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.176979 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.177000 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.177021 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.177038 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.279937 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.279974 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.279986 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.280033 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.280046 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.383048 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.383131 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.383194 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.383220 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.383238 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.486954 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.487021 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.487043 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.487071 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.487094 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.590220 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.590339 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.590361 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.590434 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.590455 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.638265 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 16:09:58.267399364 +0000 UTC Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.639518 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:36 crc kubenswrapper[4689]: E0123 10:49:36.639738 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.725652 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.725731 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.725754 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.725784 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.725807 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.828963 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.829028 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.829087 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.829132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.829202 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.930930 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.931014 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.931038 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.931117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:36 crc kubenswrapper[4689]: I0123 10:49:36.931138 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:36Z","lastTransitionTime":"2026-01-23T10:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.034064 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.034105 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.034112 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.034130 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.034140 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.136549 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.137023 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.137248 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.137428 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.137585 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.241729 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.241790 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.241810 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.241834 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.241854 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.345112 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.345189 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.345203 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.345231 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.345240 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.448603 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.449056 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.449258 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.449403 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.449569 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.552662 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.552699 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.552707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.552722 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.552731 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.639695 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.639719 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.639726 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.640665 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 09:46:47.578961734 +0000 UTC Jan 23 10:49:37 crc kubenswrapper[4689]: E0123 10:49:37.640536 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:37 crc kubenswrapper[4689]: E0123 10:49:37.640821 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:37 crc kubenswrapper[4689]: E0123 10:49:37.640987 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.655438 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.655500 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.655511 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.655528 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.655538 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.753565 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:37 crc kubenswrapper[4689]: E0123 10:49:37.753982 4689 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:37 crc kubenswrapper[4689]: E0123 10:49:37.755019 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs podName:01ee7060-466f-4294-934f-3df3b9aa7afe nodeName:}" failed. No retries permitted until 2026-01-23 10:49:45.754101833 +0000 UTC m=+50.378781732 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs") pod "network-metrics-daemon-cpc6c" (UID: "01ee7060-466f-4294-934f-3df3b9aa7afe") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.759859 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.760231 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.760254 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.760278 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.760297 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.863243 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.863280 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.863291 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.863322 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.863335 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.966103 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.966135 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.966164 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.966177 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:37 crc kubenswrapper[4689]: I0123 10:49:37.966186 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:37Z","lastTransitionTime":"2026-01-23T10:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.068916 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.068999 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.069029 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.069063 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.069084 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.172458 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.172510 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.172532 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.172557 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.172579 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.276531 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.276904 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.277135 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.277367 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.277577 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.380188 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.380262 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.380288 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.380318 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.380339 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.483620 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.483686 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.483705 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.483733 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.483751 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.585769 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.585835 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.585857 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.585884 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.585905 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.638997 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:38 crc kubenswrapper[4689]: E0123 10:49:38.639239 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.641115 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 01:25:24.18053017 +0000 UTC Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.689554 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.689618 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.689634 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.689660 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.689680 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.794347 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.794432 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.794458 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.794499 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.794521 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.897272 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.897338 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.897362 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.897391 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:38 crc kubenswrapper[4689]: I0123 10:49:38.897417 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:38Z","lastTransitionTime":"2026-01-23T10:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.000403 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.000475 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.000495 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.000517 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.000534 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.063301 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.063363 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.063380 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.063404 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.063421 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.084587 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:39Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.090920 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.090982 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.091010 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.091034 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.091052 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.114521 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:39Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.119561 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.119776 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.119882 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.119963 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.120042 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.141207 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:39Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.146191 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.146258 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.146277 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.146302 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.146322 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.161845 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:39Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.165797 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.165858 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.165870 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.165885 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.165897 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.185482 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:39Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.185791 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.187436 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.187483 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.187500 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.187518 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.187532 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.289694 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.290045 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.290321 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.290541 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.290722 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.393066 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.393106 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.393116 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.393127 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.393136 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.496170 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.496536 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.496603 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.496738 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.496812 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.599412 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.599555 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.599575 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.599602 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.599624 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.638953 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.639035 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.639081 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.639195 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.639035 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:39 crc kubenswrapper[4689]: E0123 10:49:39.639276 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.641990 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 06:01:55.269058783 +0000 UTC Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.705346 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.705586 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.705704 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.705805 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.705888 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.809316 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.809373 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.809390 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.809414 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.809432 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.912837 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.912908 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.912931 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.912963 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:39 crc kubenswrapper[4689]: I0123 10:49:39.912985 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:39Z","lastTransitionTime":"2026-01-23T10:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.016227 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.016298 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.016317 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.016340 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.016358 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.119429 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.119480 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.119498 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.119522 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.119967 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.222798 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.223228 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.223449 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.223665 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.223857 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.328326 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.328391 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.328417 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.328455 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.328475 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.431621 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.431954 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.432041 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.432122 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.432257 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.535204 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.535514 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.535680 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.535789 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.535890 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.639004 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.639021 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: E0123 10:49:40.639198 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.639276 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.639334 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.639361 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.639374 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.642909 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 22:53:00.37878896 +0000 UTC Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.742690 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.743098 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.743277 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.743444 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.743626 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.845886 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.845957 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.845975 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.845995 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.846008 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.949256 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.949590 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.949806 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.949944 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:40 crc kubenswrapper[4689]: I0123 10:49:40.950068 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:40Z","lastTransitionTime":"2026-01-23T10:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.053393 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.053431 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.053442 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.053457 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.053468 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.156659 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.156729 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.156749 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.156779 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.156804 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.259875 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.259937 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.259955 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.259978 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.259996 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.362103 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.362612 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.362752 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.362871 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.362957 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.465368 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.465778 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.465964 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.466180 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.466380 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.569833 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.569879 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.569895 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.569917 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.569933 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.639387 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:41 crc kubenswrapper[4689]: E0123 10:49:41.639782 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.639577 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:41 crc kubenswrapper[4689]: E0123 10:49:41.640021 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.639419 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:41 crc kubenswrapper[4689]: E0123 10:49:41.640499 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.643284 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 22:59:08.459351361 +0000 UTC Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.672344 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.672602 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.672679 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.672771 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.672856 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.774465 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.774493 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.774502 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.774515 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.774524 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.876691 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.876724 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.876734 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.876748 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.876758 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.979081 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.979438 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.979587 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.979734 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:41 crc kubenswrapper[4689]: I0123 10:49:41.979861 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:41Z","lastTransitionTime":"2026-01-23T10:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.082099 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.082137 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.082169 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.082183 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.082193 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.184450 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.184687 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.184793 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.184888 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.184961 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.287309 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.287386 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.287420 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.287442 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.287455 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.390668 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.390723 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.390736 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.390754 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.390767 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.493667 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.493745 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.493769 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.493798 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.493822 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.596835 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.596863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.596874 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.596888 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.596898 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.639042 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:42 crc kubenswrapper[4689]: E0123 10:49:42.639201 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.644284 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 20:23:46.258016065 +0000 UTC Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.700748 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.700825 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.700850 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.700877 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.700894 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.748734 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.760209 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.768616 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.804315 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.804670 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.804876 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.805045 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.805251 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.806060 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.831567 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.851629 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.866022 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.895247 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.907829 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.907893 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.907915 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.907941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.907959 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:42Z","lastTransitionTime":"2026-01-23T10:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.913070 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.930270 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.949743 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.969051 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.982940 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:42 crc kubenswrapper[4689]: I0123 10:49:42.997948 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:42Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.009890 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:43Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.010261 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.010305 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.010334 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.010376 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.010400 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.029032 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:43Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.045598 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:43Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.061894 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:43Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.081988 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:43Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.112745 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.113010 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.113134 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.113261 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.113338 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.217598 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.217661 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.217683 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.217711 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.217731 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.320182 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.320554 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.320756 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.320949 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.321211 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.423713 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.423790 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.423809 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.423833 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.423852 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.526775 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.526808 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.526817 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.526830 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.526858 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.629060 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.629113 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.629124 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.629142 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.629177 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.639436 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.639464 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.639535 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:43 crc kubenswrapper[4689]: E0123 10:49:43.639668 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:43 crc kubenswrapper[4689]: E0123 10:49:43.639753 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:43 crc kubenswrapper[4689]: E0123 10:49:43.639829 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.644391 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 06:27:11.566450256 +0000 UTC Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.732865 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.732942 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.732980 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.733059 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.733105 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.836023 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.836089 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.836108 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.836140 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.836213 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.938758 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.938803 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.938813 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.938828 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:43 crc kubenswrapper[4689]: I0123 10:49:43.938841 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:43Z","lastTransitionTime":"2026-01-23T10:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.041576 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.041632 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.041645 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.041668 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.041692 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.144979 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.145026 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.145044 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.145066 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.145083 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.248962 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.249023 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.249048 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.249072 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.249089 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.351915 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.352005 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.352060 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.352079 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.352099 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.455906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.456040 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.456105 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.456133 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.456211 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.560559 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.560624 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.560640 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.560662 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.560676 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.638951 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:44 crc kubenswrapper[4689]: E0123 10:49:44.639219 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.644879 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 18:11:06.070145825 +0000 UTC Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.664281 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.664346 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.664358 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.664375 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.664386 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.766649 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.766689 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.766707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.766724 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.766735 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.870118 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.870221 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.870247 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.870277 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.870296 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.973655 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.973714 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.973752 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.973785 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:44 crc kubenswrapper[4689]: I0123 10:49:44.973806 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:44Z","lastTransitionTime":"2026-01-23T10:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.077586 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.077657 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.077678 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.077707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.077729 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.180202 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.180263 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.180281 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.180305 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.180322 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.283097 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.283165 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.283182 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.283202 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.283215 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.390599 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.390648 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.390661 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.390680 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.390697 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.494189 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.494278 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.494307 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.494338 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.494356 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.597620 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.597703 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.597731 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.597779 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.597806 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.639007 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.639189 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.639307 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.639611 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.639685 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.640498 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.645295 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 02:17:06.023624469 +0000 UTC Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.654587 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.672315 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.689445 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.701026 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.701087 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.701107 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.701133 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.701184 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.711460 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.739077 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.770939 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.788452 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.804598 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.804815 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.804953 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.805023 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.805088 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.805606 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.818020 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.837262 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.840550 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.840714 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.840788 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:17.840759711 +0000 UTC m=+82.465439610 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.840862 4689 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.840916 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs podName:01ee7060-466f-4294-934f-3df3b9aa7afe nodeName:}" failed. No retries permitted until 2026-01-23 10:50:01.840906095 +0000 UTC m=+66.465585954 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs") pod "network-metrics-daemon-cpc6c" (UID: "01ee7060-466f-4294-934f-3df3b9aa7afe") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.850521 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.866826 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.883003 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.898824 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.909850 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.909973 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.909993 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.910464 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.910493 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:45Z","lastTransitionTime":"2026-01-23T10:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.922577 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.939221 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.941409 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.941472 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.941518 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.941551 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941640 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941672 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941685 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941732 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941747 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:50:17.941725292 +0000 UTC m=+82.566405151 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941795 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:50:17.941774454 +0000 UTC m=+82.566454323 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941655 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941909 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941948 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.941962 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.942050 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:50:17.941994049 +0000 UTC m=+82.566673908 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:49:45 crc kubenswrapper[4689]: E0123 10:49:45.942089 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:50:17.942079321 +0000 UTC m=+82.566759180 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.951557 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:45 crc kubenswrapper[4689]: I0123 10:49:45.966641 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:45Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.013547 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.013623 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.013642 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.013671 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.013689 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.117278 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.117319 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.117330 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.117348 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.117360 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.220211 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.220265 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.220284 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.220305 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.220320 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.322990 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.323044 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.323061 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.323082 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.323098 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.426807 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.426871 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.426892 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.426917 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.426935 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.530341 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.530414 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.530433 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.530462 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.530481 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.634054 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.634112 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.634128 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.634186 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.634213 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.639863 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:46 crc kubenswrapper[4689]: E0123 10:49:46.640000 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.646070 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 14:58:47.757150989 +0000 UTC Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.736953 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.737043 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.737064 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.737092 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.737111 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.840297 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.840366 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.840390 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.840419 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.840441 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.944910 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.944992 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.945008 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.945028 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:46 crc kubenswrapper[4689]: I0123 10:49:46.945040 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:46Z","lastTransitionTime":"2026-01-23T10:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.048020 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.048080 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.048097 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.048122 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.048140 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.151244 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.151308 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.151322 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.151342 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.151356 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.254950 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.255052 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.255067 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.255090 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.255104 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.357735 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.357772 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.357780 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.357794 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.357805 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.460707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.460744 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.460761 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.460775 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.460798 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.563230 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.563303 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.563325 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.563352 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.563371 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.639543 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.639633 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.639550 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:47 crc kubenswrapper[4689]: E0123 10:49:47.639716 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:47 crc kubenswrapper[4689]: E0123 10:49:47.639841 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:47 crc kubenswrapper[4689]: E0123 10:49:47.639969 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.641014 4689 scope.go:117] "RemoveContainer" containerID="063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.646459 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 05:20:29.152797796 +0000 UTC Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.670605 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.670953 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.670974 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.670999 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.671015 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.773282 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.773327 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.773338 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.773355 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.773366 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.876739 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.876794 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.876810 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.876835 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.876851 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.971775 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/1.log" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.975637 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.976084 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.978468 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.978523 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.978535 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.978549 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.978560 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:47Z","lastTransitionTime":"2026-01-23T10:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:47 crc kubenswrapper[4689]: I0123 10:49:47.990141 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:47Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.001745 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.015927 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.029777 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.051753 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.069248 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.080398 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.080437 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.080446 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.080461 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.080471 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.089629 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.114063 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.127105 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.146217 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.157705 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.171486 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.182456 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.182509 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.182524 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.182545 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.182560 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.193117 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.206527 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.226523 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.237438 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.253477 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.264658 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:48Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.284952 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.285078 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.285124 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.285164 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.285180 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.388070 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.388125 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.388135 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.388168 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.388181 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.490819 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.490874 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.490890 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.490909 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.490922 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.593343 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.593421 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.593443 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.593473 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.593495 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.639339 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:48 crc kubenswrapper[4689]: E0123 10:49:48.639521 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.646843 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 00:13:33.369417891 +0000 UTC Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.696808 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.696865 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.696883 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.696906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.696923 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.799111 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.799197 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.799210 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.799226 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.799238 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.902357 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.902422 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.902440 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.902464 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.902483 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:48Z","lastTransitionTime":"2026-01-23T10:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.981084 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/2.log" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.981867 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/1.log" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.985314 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0" exitCode=1 Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.985357 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0"} Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.985393 4689 scope.go:117] "RemoveContainer" containerID="063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52" Jan 23 10:49:48 crc kubenswrapper[4689]: I0123 10:49:48.988637 4689 scope.go:117] "RemoveContainer" containerID="6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0" Jan 23 10:49:48 crc kubenswrapper[4689]: E0123 10:49:48.990774 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\"" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.005017 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.005352 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.005383 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.005397 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.005416 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.005430 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.025739 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.046927 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.067770 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.094783 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.108736 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.108803 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.108822 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.108846 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.108863 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.148782 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://063a343392f95388af3f809520323e2c200caedcb49db72edce4ab488dc47e52\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI0123 10:49:26.749184 6119 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0123 10:49:26.749516 6119 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 10:49:26.749574 6119 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:26.749584 6119 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:26.749599 6119 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 10:49:26.749609 6119 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 10:49:26.749633 6119 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:26.749675 6119 factory.go:656] Stopping watch factory\\\\nI0123 10:49:26.749694 6119 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:26.749744 6119 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 10:49:26.749759 6119 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:26.749767 6119 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:26.749775 6119 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:26.749783 6119 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:48Z\\\",\\\"message\\\":\\\"dler 7 for removal\\\\nI0123 10:49:48.520873 6382 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:48.520918 6382 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.520959 6382 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:48.521007 6382 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0123 10:49:48.521079 6382 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 10:49:48.521235 6382 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:48.521296 6382 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 10:49:48.521424 6382 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.521510 6382 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:48.521586 6382 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:48.521665 6382 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:48.521727 6382 factory.go:656] Stopping watch factory\\\\nI0123 10:49:48.521801 6382 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:48.521741 6382 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:48.521882 6382 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 10:49:48.522220 6382 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.187259 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.211574 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.211628 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.211792 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.211837 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.211889 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.213300 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.233852 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.253814 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.265598 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.275561 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.289316 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.309010 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.315490 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.315586 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.315607 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.315638 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.315658 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.326620 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.343839 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.361599 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.374527 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.420018 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.420132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.420187 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.420217 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.420239 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.514936 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.514992 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.515014 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.515036 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.515050 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.531770 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.538326 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.538414 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.538439 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.538475 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.538498 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.560676 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.565962 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.566025 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.566045 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.566074 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.566097 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.588319 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.593605 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.593665 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.593681 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.593707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.593720 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.617931 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.661463 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 15:20:24.823938788 +0000 UTC Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.661945 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.662223 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.662425 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.662531 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.662641 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.662935 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.664034 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.664095 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.664117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.664144 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.664255 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.684914 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:49Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.685094 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.686758 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.686784 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.686793 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.686809 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.686822 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.790100 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.790364 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.790459 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.790541 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.790623 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.893911 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.893974 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.893987 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.894007 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.894019 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.991526 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/2.log" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.995851 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.995912 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.995931 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.995956 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.995974 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:49Z","lastTransitionTime":"2026-01-23T10:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:49 crc kubenswrapper[4689]: I0123 10:49:49.996523 4689 scope.go:117] "RemoveContainer" containerID="6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0" Jan 23 10:49:49 crc kubenswrapper[4689]: E0123 10:49:49.996828 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\"" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.016187 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.031247 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.051310 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.070947 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.089330 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.099092 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.099189 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.099209 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.099236 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.099256 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.103818 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.121829 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.140380 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.162134 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.178329 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.194968 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.201333 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.201385 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.201398 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.201416 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.201429 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.218676 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.244815 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.265406 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.282016 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.293903 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.304709 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.304760 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.304780 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.304805 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.304824 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.313660 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:48Z\\\",\\\"message\\\":\\\"dler 7 for removal\\\\nI0123 10:49:48.520873 6382 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:48.520918 6382 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.520959 6382 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:48.521007 6382 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0123 10:49:48.521079 6382 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 10:49:48.521235 6382 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:48.521296 6382 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 10:49:48.521424 6382 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.521510 6382 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:48.521586 6382 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:48.521665 6382 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:48.521727 6382 factory.go:656] Stopping watch factory\\\\nI0123 10:49:48.521801 6382 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:48.521741 6382 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:48.521882 6382 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 10:49:48.522220 6382 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.327490 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:50Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.407522 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.407575 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.407586 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.407603 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.407617 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.510619 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.510709 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.510750 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.510783 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.510805 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.614947 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.615035 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.615062 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.615096 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.615119 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.639755 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:50 crc kubenswrapper[4689]: E0123 10:49:50.639985 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.661809 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 19:02:39.263947704 +0000 UTC Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.717934 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.717984 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.717993 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.718010 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.718022 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.821350 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.821394 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.821405 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.821421 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.821432 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.925362 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.925430 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.925449 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.925475 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:50 crc kubenswrapper[4689]: I0123 10:49:50.925494 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:50Z","lastTransitionTime":"2026-01-23T10:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.028529 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.028608 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.028626 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.028657 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.028675 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.132846 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.132913 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.132931 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.132957 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.132975 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.235819 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.235885 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.235909 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.235940 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.235965 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.338878 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.338970 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.338995 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.339035 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.339060 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.442493 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.442558 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.442585 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.442617 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.442635 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.546462 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.546748 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.546849 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.546945 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.547066 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.639374 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.639408 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:51 crc kubenswrapper[4689]: E0123 10:49:51.639947 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:51 crc kubenswrapper[4689]: E0123 10:49:51.640128 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.639448 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:51 crc kubenswrapper[4689]: E0123 10:49:51.640851 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.650494 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.650567 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.650595 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.650638 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.650661 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.661966 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 20:00:03.704016007 +0000 UTC Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.754724 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.754785 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.754803 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.754826 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.754844 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.859716 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.859779 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.859803 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.859835 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.859859 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.963364 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.963454 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.963523 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.963558 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:51 crc kubenswrapper[4689]: I0123 10:49:51.963580 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:51Z","lastTransitionTime":"2026-01-23T10:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.066880 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.066939 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.066957 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.066984 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.067003 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.170582 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.170623 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.170639 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.170670 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.170692 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.275727 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.275861 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.275887 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.275953 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.275980 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.380029 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.380108 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.380134 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.380199 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.380223 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.483312 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.483357 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.483368 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.483404 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.483417 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.585942 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.585980 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.585990 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.586005 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.586018 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.639471 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:52 crc kubenswrapper[4689]: E0123 10:49:52.639649 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.662592 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 07:38:33.910677541 +0000 UTC Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.689120 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.689219 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.689243 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.689265 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.689283 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.792396 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.792466 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.792492 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.792522 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.792542 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.895336 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.895399 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.895416 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.895440 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.895458 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.998423 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.998477 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.998497 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.998521 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:52 crc kubenswrapper[4689]: I0123 10:49:52.998538 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:52Z","lastTransitionTime":"2026-01-23T10:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.101659 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.101703 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.101713 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.101730 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.101743 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.204439 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.204497 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.204527 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.204554 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.204573 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.307883 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.307942 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.307958 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.307980 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.307996 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.411628 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.411691 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.411715 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.411744 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.411767 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.513884 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.513916 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.513926 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.513941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.513952 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.616851 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.616895 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.616906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.616923 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.616933 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.639820 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.639927 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.639822 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:53 crc kubenswrapper[4689]: E0123 10:49:53.639935 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:53 crc kubenswrapper[4689]: E0123 10:49:53.640086 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:53 crc kubenswrapper[4689]: E0123 10:49:53.640324 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.663572 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 17:32:38.776623605 +0000 UTC Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.720032 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.720114 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.720130 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.720181 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.720205 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.824256 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.824382 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.824408 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.824438 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.824461 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.927820 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.927880 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.927898 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.927922 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:53 crc kubenswrapper[4689]: I0123 10:49:53.927943 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:53Z","lastTransitionTime":"2026-01-23T10:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.031548 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.031607 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.031617 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.031630 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.031640 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.134221 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.134281 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.134290 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.134304 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.134319 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.236923 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.236994 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.237012 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.237038 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.237056 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.340495 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.340552 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.340569 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.340593 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.340610 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.444120 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.444212 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.444231 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.444256 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.444274 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.547678 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.547768 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.547786 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.547843 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.547881 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.639347 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:54 crc kubenswrapper[4689]: E0123 10:49:54.639537 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.651470 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.651571 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.651593 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.651648 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.651667 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.664575 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 04:00:49.723551576 +0000 UTC Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.754863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.754929 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.754947 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.754973 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.754995 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.859910 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.859993 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.860015 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.860048 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.860068 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.963941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.964006 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.964023 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.964046 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:54 crc kubenswrapper[4689]: I0123 10:49:54.964063 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:54Z","lastTransitionTime":"2026-01-23T10:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.067036 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.067104 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.067121 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.067177 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.067194 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.170890 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.170964 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.170983 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.171011 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.171031 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.273817 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.273874 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.273886 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.273904 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.274350 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.377580 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.377666 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.377676 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.377699 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.377712 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.480626 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.480700 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.480718 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.480746 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.480768 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.586014 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.586069 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.586082 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.586102 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.586115 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.639096 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.639135 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.639205 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:55 crc kubenswrapper[4689]: E0123 10:49:55.639350 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:55 crc kubenswrapper[4689]: E0123 10:49:55.639458 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:55 crc kubenswrapper[4689]: E0123 10:49:55.639599 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.664052 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.664993 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 22:51:50.150857255 +0000 UTC Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.677913 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.689080 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.689176 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.689198 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.689223 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.689240 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.702812 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:48Z\\\",\\\"message\\\":\\\"dler 7 for removal\\\\nI0123 10:49:48.520873 6382 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:48.520918 6382 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.520959 6382 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:48.521007 6382 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0123 10:49:48.521079 6382 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 10:49:48.521235 6382 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:48.521296 6382 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 10:49:48.521424 6382 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.521510 6382 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:48.521586 6382 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:48.521665 6382 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:48.521727 6382 factory.go:656] Stopping watch factory\\\\nI0123 10:49:48.521801 6382 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:48.521741 6382 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:48.521882 6382 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 10:49:48.522220 6382 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.717412 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.745284 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.764101 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.776891 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.789052 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.792528 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.792567 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.792583 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.792604 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.792619 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.810920 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.823071 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.835176 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.845050 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.856338 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.866384 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.878666 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.888587 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.896192 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.896237 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.896250 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.896274 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.896292 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.899896 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.916896 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:55Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.999581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.999618 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.999625 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.999639 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:55 crc kubenswrapper[4689]: I0123 10:49:55.999648 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:55Z","lastTransitionTime":"2026-01-23T10:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.103793 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.103865 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.103884 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.103910 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.103929 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.207183 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.207251 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.207270 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.207296 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.207316 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.309850 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.309938 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.309962 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.309995 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.310019 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.412983 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.413022 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.413031 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.413045 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.413059 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.516344 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.516416 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.516442 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.516474 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.516496 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.619356 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.619395 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.619406 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.619421 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.619432 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.639042 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:56 crc kubenswrapper[4689]: E0123 10:49:56.639208 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.665793 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 17:38:46.403969458 +0000 UTC Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.721703 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.721746 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.721757 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.721773 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.721785 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.824957 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.825009 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.825021 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.825037 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.825049 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.928259 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.928324 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.928348 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.928376 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:56 crc kubenswrapper[4689]: I0123 10:49:56.928399 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:56Z","lastTransitionTime":"2026-01-23T10:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.030936 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.030968 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.030976 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.030990 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.030999 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.133369 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.133429 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.133445 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.133467 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.133483 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.235191 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.235251 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.235267 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.235288 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.235300 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.337420 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.337485 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.337502 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.337524 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.337542 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.440503 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.440578 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.440607 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.440639 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.440662 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.543728 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.543761 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.543769 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.543783 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.543791 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.639370 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:57 crc kubenswrapper[4689]: E0123 10:49:57.639699 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.640260 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:57 crc kubenswrapper[4689]: E0123 10:49:57.640366 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.644386 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:57 crc kubenswrapper[4689]: E0123 10:49:57.644496 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.647852 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.647890 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.647907 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.647928 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.647945 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.666142 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 14:26:04.787503246 +0000 UTC Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.750448 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.750517 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.750535 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.750563 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.750583 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.855250 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.855338 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.855353 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.855373 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.855389 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.958836 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.958904 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.958924 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.958950 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:57 crc kubenswrapper[4689]: I0123 10:49:57.958969 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:57Z","lastTransitionTime":"2026-01-23T10:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.061401 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.061469 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.061486 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.061509 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.061527 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.165208 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.165355 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.165376 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.165436 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.165458 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.271882 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.271941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.271959 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.271985 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.272004 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.375949 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.376003 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.376015 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.376033 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.376043 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.478553 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.478626 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.478643 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.478669 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.478689 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.581766 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.581831 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.581844 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.581861 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.581875 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.639735 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:49:58 crc kubenswrapper[4689]: E0123 10:49:58.639922 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.666352 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 09:23:52.723148269 +0000 UTC Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.684618 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.684695 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.684718 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.684747 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.684770 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.787939 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.788001 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.788017 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.788040 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.788057 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.890598 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.890643 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.890655 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.890671 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.890690 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.994252 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.994303 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.994314 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.994329 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:58 crc kubenswrapper[4689]: I0123 10:49:58.994340 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:58Z","lastTransitionTime":"2026-01-23T10:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.097091 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.097132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.097144 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.097181 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.097194 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.199659 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.199719 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.199739 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.199762 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.199776 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.301986 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.302032 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.302044 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.302061 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.302073 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.404594 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.404622 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.404631 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.404643 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.404651 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.506831 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.506878 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.506889 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.506905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.506916 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.609393 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.609441 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.609455 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.609471 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.609482 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.639860 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.639917 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.639882 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.640010 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.640109 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.640320 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.666659 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 11:52:37.234302523 +0000 UTC Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.712588 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.712652 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.712666 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.712687 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.712701 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.815618 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.815692 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.815702 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.815723 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.815740 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.818845 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.818905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.818920 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.818938 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.818950 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.830876 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:59Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.835391 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.835438 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.835454 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.835475 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.835489 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.849450 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:59Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.854385 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.854428 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.854439 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.854453 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.854466 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.867131 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:59Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.870850 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.870891 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.870901 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.870917 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.870928 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.883034 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:59Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.886250 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.886285 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.886294 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.886309 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.886320 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.900104 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:49:59Z is after 2025-08-24T17:21:41Z" Jan 23 10:49:59 crc kubenswrapper[4689]: E0123 10:49:59.900290 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.917840 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.917892 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.917905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.917922 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:49:59 crc kubenswrapper[4689]: I0123 10:49:59.917934 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:49:59Z","lastTransitionTime":"2026-01-23T10:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.020074 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.020109 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.020117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.020132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.020142 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.122245 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.122276 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.122283 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.122296 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.122305 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.224855 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.224900 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.224910 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.224925 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.224937 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.328372 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.328427 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.328440 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.328461 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.328475 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.431925 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.432190 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.432202 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.432224 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.432240 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.535237 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.535307 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.535322 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.535342 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.535354 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.638012 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.638058 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.638071 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.638086 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.638097 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.639497 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:00 crc kubenswrapper[4689]: E0123 10:50:00.639674 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.667048 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 19:16:17.698590894 +0000 UTC Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.740929 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.740986 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.740996 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.741009 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.741018 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.843248 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.843281 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.843293 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.843307 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.843317 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.952463 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.952556 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.952581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.952606 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:00 crc kubenswrapper[4689]: I0123 10:50:00.952624 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:00Z","lastTransitionTime":"2026-01-23T10:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.054492 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.054531 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.054544 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.054560 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.054571 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.156699 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.156744 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.156757 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.156771 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.156782 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.258748 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.258783 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.258795 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.258810 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.258820 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.360904 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.360940 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.360952 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.360967 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.360978 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.463894 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.463995 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.464017 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.464043 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.464060 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.566298 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.566333 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.566341 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.566355 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.566363 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.639060 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.639089 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.639188 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:01 crc kubenswrapper[4689]: E0123 10:50:01.639284 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:01 crc kubenswrapper[4689]: E0123 10:50:01.639547 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:01 crc kubenswrapper[4689]: E0123 10:50:01.639692 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.667384 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 14:07:24.241333746 +0000 UTC Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.669377 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.669438 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.669452 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.669475 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.669496 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.772533 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.772586 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.772598 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.772614 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.772626 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.875538 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.875604 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.875617 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.875639 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.875653 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.926607 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:01 crc kubenswrapper[4689]: E0123 10:50:01.926985 4689 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:50:01 crc kubenswrapper[4689]: E0123 10:50:01.927093 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs podName:01ee7060-466f-4294-934f-3df3b9aa7afe nodeName:}" failed. No retries permitted until 2026-01-23 10:50:33.927070011 +0000 UTC m=+98.551749970 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs") pod "network-metrics-daemon-cpc6c" (UID: "01ee7060-466f-4294-934f-3df3b9aa7afe") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.978446 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.978487 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.978498 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.978521 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:01 crc kubenswrapper[4689]: I0123 10:50:01.978534 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:01Z","lastTransitionTime":"2026-01-23T10:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.037931 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-xxklh_d5f32f36-d66c-4202-ac54-e81c6d978146/kube-multus/0.log" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.038008 4689 generic.go:334] "Generic (PLEG): container finished" podID="d5f32f36-d66c-4202-ac54-e81c6d978146" containerID="66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f" exitCode=1 Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.038052 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-xxklh" event={"ID":"d5f32f36-d66c-4202-ac54-e81c6d978146","Type":"ContainerDied","Data":"66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.038661 4689 scope.go:117] "RemoveContainer" containerID="66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.058586 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.072696 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:50:01Z\\\",\\\"message\\\":\\\"2026-01-23T10:49:16+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c3b16294-9d08-4fc0-bf01-da2c1c448dbd\\\\n2026-01-23T10:49:16+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c3b16294-9d08-4fc0-bf01-da2c1c448dbd to /host/opt/cni/bin/\\\\n2026-01-23T10:49:16Z [verbose] multus-daemon started\\\\n2026-01-23T10:49:16Z [verbose] Readiness Indicator file check\\\\n2026-01-23T10:50:01Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.080901 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.080928 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.080938 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.080958 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.080970 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.090543 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.102697 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.122768 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:48Z\\\",\\\"message\\\":\\\"dler 7 for removal\\\\nI0123 10:49:48.520873 6382 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:48.520918 6382 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.520959 6382 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:48.521007 6382 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0123 10:49:48.521079 6382 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 10:49:48.521235 6382 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:48.521296 6382 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 10:49:48.521424 6382 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.521510 6382 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:48.521586 6382 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:48.521665 6382 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:48.521727 6382 factory.go:656] Stopping watch factory\\\\nI0123 10:49:48.521801 6382 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:48.521741 6382 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:48.521882 6382 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 10:49:48.522220 6382 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.135041 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.157424 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.172463 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.183451 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.183479 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.183488 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.183502 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.183510 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.186001 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.197751 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.208788 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.221451 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.234704 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.246220 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.260747 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.274513 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.289679 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.289732 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.289749 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.289773 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.289790 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.289749 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.302928 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:02Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.392592 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.392626 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.392634 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.392648 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.392657 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.495368 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.495445 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.495457 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.495475 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.495488 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.597711 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.597774 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.597794 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.597819 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.597839 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.639184 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:02 crc kubenswrapper[4689]: E0123 10:50:02.639324 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.668249 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 09:28:21.232905167 +0000 UTC Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.700178 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.700211 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.700223 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.700237 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.700247 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.802968 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.803009 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.803024 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.803040 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.803051 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.905801 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.905867 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.905881 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.905898 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:02 crc kubenswrapper[4689]: I0123 10:50:02.905911 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:02Z","lastTransitionTime":"2026-01-23T10:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.008503 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.008563 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.008581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.008603 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.008619 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.043342 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-xxklh_d5f32f36-d66c-4202-ac54-e81c6d978146/kube-multus/0.log" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.043404 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-xxklh" event={"ID":"d5f32f36-d66c-4202-ac54-e81c6d978146","Type":"ContainerStarted","Data":"d3d9560b72386cc0d0e85ef107cbd87d3526654d4f7d257611190f530bf11128"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.059029 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.078314 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.091660 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.102777 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d9560b72386cc0d0e85ef107cbd87d3526654d4f7d257611190f530bf11128\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:50:01Z\\\",\\\"message\\\":\\\"2026-01-23T10:49:16+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c3b16294-9d08-4fc0-bf01-da2c1c448dbd\\\\n2026-01-23T10:49:16+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c3b16294-9d08-4fc0-bf01-da2c1c448dbd to /host/opt/cni/bin/\\\\n2026-01-23T10:49:16Z [verbose] multus-daemon started\\\\n2026-01-23T10:49:16Z [verbose] Readiness Indicator file check\\\\n2026-01-23T10:50:01Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:50:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.111033 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.111068 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.111077 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.111090 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.111099 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.121417 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.133611 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.151590 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.164436 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.176746 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.187189 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.206576 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:48Z\\\",\\\"message\\\":\\\"dler 7 for removal\\\\nI0123 10:49:48.520873 6382 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:48.520918 6382 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.520959 6382 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:48.521007 6382 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0123 10:49:48.521079 6382 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 10:49:48.521235 6382 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:48.521296 6382 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 10:49:48.521424 6382 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.521510 6382 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:48.521586 6382 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:48.521665 6382 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:48.521727 6382 factory.go:656] Stopping watch factory\\\\nI0123 10:49:48.521801 6382 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:48.521741 6382 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:48.521882 6382 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 10:49:48.522220 6382 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.214173 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.214223 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.214235 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.214254 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.214267 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.218630 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.230764 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.243462 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.256062 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.267969 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.277654 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.286129 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:03Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.317804 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.317853 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.317866 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.317886 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.317900 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.420611 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.420662 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.420675 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.420693 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.420705 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.523224 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.523260 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.523271 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.523286 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.523296 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.626507 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.626550 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.626560 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.626577 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.626589 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.639822 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.639867 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:03 crc kubenswrapper[4689]: E0123 10:50:03.640002 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.640046 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:03 crc kubenswrapper[4689]: E0123 10:50:03.640204 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:03 crc kubenswrapper[4689]: E0123 10:50:03.640360 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.668503 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 12:58:21.420596309 +0000 UTC Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.734346 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.734393 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.734420 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.734459 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.734478 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.837941 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.838010 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.838034 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.838063 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.838085 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.941568 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.941614 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.941629 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.941649 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:03 crc kubenswrapper[4689]: I0123 10:50:03.941665 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:03Z","lastTransitionTime":"2026-01-23T10:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.044216 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.044263 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.044275 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.044293 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.044304 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.146425 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.146458 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.146469 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.146486 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.146498 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.249297 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.249329 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.249339 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.249353 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.249363 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.351060 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.351089 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.351097 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.351112 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.351123 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.453476 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.453506 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.453517 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.453531 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.453542 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.555285 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.555330 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.555341 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.555357 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.555368 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.639329 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:04 crc kubenswrapper[4689]: E0123 10:50:04.639611 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.639806 4689 scope.go:117] "RemoveContainer" containerID="6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0" Jan 23 10:50:04 crc kubenswrapper[4689]: E0123 10:50:04.639961 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\"" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.657979 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.658012 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.658023 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.658039 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.658051 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.668683 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 20:12:44.821505421 +0000 UTC Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.760457 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.760492 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.760500 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.760512 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.760521 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.862604 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.862641 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.862648 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.862660 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.862669 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.964759 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.964790 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.964797 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.964809 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:04 crc kubenswrapper[4689]: I0123 10:50:04.964818 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:04Z","lastTransitionTime":"2026-01-23T10:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.067346 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.067421 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.067447 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.067478 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.067497 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.169568 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.169610 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.169622 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.169638 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.169649 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.271718 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.271755 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.271767 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.271782 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.271793 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.375637 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.375679 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.375689 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.375703 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.375712 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.478504 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.478546 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.478555 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.478571 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.478581 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.580811 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.580856 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.580867 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.580883 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.580895 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.639925 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.639976 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.639926 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:05 crc kubenswrapper[4689]: E0123 10:50:05.640026 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:05 crc kubenswrapper[4689]: E0123 10:50:05.640066 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:05 crc kubenswrapper[4689]: E0123 10:50:05.640129 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.653840 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rcnzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"261874ce-6002-4c08-b8f4-d507aae12d40\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://413cd6202820c706706884357eb1210dae1abed04832f2720e46ca62c7f44e1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjrzx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rcnzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.669761 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 20:46:00.757150276 +0000 UTC Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.684017 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.684054 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.684065 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.684081 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.684093 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.689552 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5206d70b-3d3b-404c-b969-713242a23d38\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:49:48Z\\\",\\\"message\\\":\\\"dler 7 for removal\\\\nI0123 10:49:48.520873 6382 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 10:49:48.520918 6382 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.520959 6382 handler.go:208] Removed *v1.Node event handler 7\\\\nI0123 10:49:48.521007 6382 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0123 10:49:48.521079 6382 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 10:49:48.521235 6382 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 10:49:48.521296 6382 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 10:49:48.521424 6382 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 10:49:48.521510 6382 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 10:49:48.521586 6382 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 10:49:48.521665 6382 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 10:49:48.521727 6382 factory.go:656] Stopping watch factory\\\\nI0123 10:49:48.521801 6382 ovnkube.go:599] Stopped ovnkube\\\\nI0123 10:49:48.521741 6382 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 10:49:48.521882 6382 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 10:49:48.522220 6382 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-jpm9c_openshift-ovn-kubernetes(5206d70b-3d3b-404c-b969-713242a23d38)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qkjb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-jpm9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.706069 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01ee7060-466f-4294-934f-3df3b9aa7afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z7zhk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cpc6c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.732064 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46dd8a0e-3818-4644-b7bd-62ba4db19ad3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e85c7a1d4d05411e46b45d94b925f8d0e4b88cdcc5b0a3e1b880616e62825ad0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf9da168c5fdf87b28cb14d4b7b7f76c3d83bf41ac0b7fbbcdb33525a625b8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://715613f84de8bf24c0ed19cd39a09872b6111cb8ee1bc80840ec0273f318b66f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://47d31204af0529ad0c0ab7241dcfe988c0f784e95f44a6c8e0add81091eacc61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e758cbca5193384b23c616dcce3fdf0aef7cc0e728b2a568b6881217e9108b8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cbdfdf57a1a701ce08ee3f2676522156faa9e174640e553bb464bc08aa22957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87bc1c156ec96c0fcec8777e1a1a96e9d6c216d160e5a461f45e5c54628610d6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9927462988a277bf7bf72a62f58610f6a500b77119bc726c09820a551cd16a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.746499 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f86e317a9ed04dfc8b1528490258160a6d27098c21285cd6d1742d73114cd073\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.761194 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c258504a27f4a2ec4af620d2367378b4af1f330c84414f9551d6e7b818a83998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fda648f164980b610acb3321e8406c04e436bcea143f96fe2d5700e1dfd1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.773586 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.786924 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.786965 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.786977 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.786993 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.787005 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.787827 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.802265 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.818515 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.830853 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.845048 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.856385 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.867811 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"477277a2-9597-4370-bc18-4c876f7898f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8908b88160ba12cd60fcfc6a81c83dea15df25bdd79bce79fdbfe65e8f24268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7645a05e37922baeda0aea3212c3edeec9974f6ef0e5b6c2a3d1653a8e28fd16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac55b1c96873cf26d0a234c7149e23261357859fd8390b68231cc1bf0d73b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.883647 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b506d5b9-724d-425b-bb31-d21fb6b92080\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T10:49:14Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 10:49:08.013654 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 10:49:08.015071 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1428804130/tls.crt::/tmp/serving-cert-1428804130/tls.key\\\\\\\"\\\\nI0123 10:49:14.155597 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 10:49:14.158604 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 10:49:14.158627 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 10:49:14.158648 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 10:49:14.158656 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 10:49:14.164204 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0123 10:49:14.164214 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0123 10:49:14.164234 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164241 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 10:49:14.164246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 10:49:14.164251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 10:49:14.164255 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 10:49:14.164259 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0123 10:49:14.166232 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:48:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.889096 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.889135 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.889157 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.889173 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.889182 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.895046 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.911620 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-xxklh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5f32f36-d66c-4202-ac54-e81c6d978146\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d9560b72386cc0d0e85ef107cbd87d3526654d4f7d257611190f530bf11128\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T10:50:01Z\\\",\\\"message\\\":\\\"2026-01-23T10:49:16+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c3b16294-9d08-4fc0-bf01-da2c1c448dbd\\\\n2026-01-23T10:49:16+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c3b16294-9d08-4fc0-bf01-da2c1c448dbd to /host/opt/cni/bin/\\\\n2026-01-23T10:49:16Z [verbose] multus-daemon started\\\\n2026-01-23T10:49:16Z [verbose] Readiness Indicator file check\\\\n2026-01-23T10:50:01Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:50:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9dgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-xxklh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.927583 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-llpck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4127aca0-3ce5-49a4-87e9-34f927efd502\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://532f65cec7216f8df26a468a42d873012eb4dcb2fc99f0ba1aa3b1663cb33ea0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b5115c39ef9b0b4a122d2b483e6edf582eceda01adbe9af164946f3d7a2a641\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2370c520ed926f9aa0e3103ffc4f2bced3b4e57862b92dec35f2264e9ed184ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a0b68e945933658770e07e28fdfbd1e6364b85f561441a5e3bd457419ed4392d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3696756a7c5c5f871bada38cbb00f509feacc17d5c9c916c8a2295925701f9ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60019f9b70f25f441255e5017e5641285c0ece18fdddc489892b9a36f83fb8e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bf51e4505eb867930d5474952e65b6103c35357b7182b17902e9396dbbf76dd0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T10:49:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T10:49:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4776\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-llpck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:05Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.992181 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.992220 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.992228 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.992242 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:05 crc kubenswrapper[4689]: I0123 10:50:05.992251 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:05Z","lastTransitionTime":"2026-01-23T10:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.095026 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.095076 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.095114 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.095132 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.095143 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.197239 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.197284 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.197295 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.197308 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.197319 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.299516 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.299553 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.299564 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.299579 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.299591 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.401594 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.401653 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.401676 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.401707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.401730 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.505139 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.505230 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.505242 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.505261 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.505274 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.606856 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.606885 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.606894 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.606906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.606915 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.639857 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:06 crc kubenswrapper[4689]: E0123 10:50:06.639964 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.670305 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 06:10:10.841316295 +0000 UTC Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.709742 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.709787 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.709800 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.709816 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.709829 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.812631 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.812665 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.812674 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.812689 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.812699 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.914398 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.914456 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.914477 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.914497 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:06 crc kubenswrapper[4689]: I0123 10:50:06.914510 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:06Z","lastTransitionTime":"2026-01-23T10:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.018055 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.018118 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.018129 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.018180 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.018196 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.120237 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.120269 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.120277 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.120290 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.120299 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.222288 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.222329 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.222342 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.222362 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.222375 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.324610 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.324681 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.324701 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.324729 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.324760 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.427902 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.427942 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.427953 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.427966 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.427975 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.530284 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.530627 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.530863 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.531105 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.531377 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.634277 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.634344 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.634365 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.634393 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.634415 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.639528 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:07 crc kubenswrapper[4689]: E0123 10:50:07.639710 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.640007 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:07 crc kubenswrapper[4689]: E0123 10:50:07.640382 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.640039 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:07 crc kubenswrapper[4689]: E0123 10:50:07.641116 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.671448 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 13:14:49.97411989 +0000 UTC Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.737529 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.737578 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.737598 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.737625 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.737644 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.840327 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.840387 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.840404 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.840432 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.840451 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.942879 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.943666 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.943796 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.943963 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:07 crc kubenswrapper[4689]: I0123 10:50:07.944079 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:07Z","lastTransitionTime":"2026-01-23T10:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.047581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.047627 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.047639 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.047658 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.047669 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.157775 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.157845 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.157858 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.157875 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.157886 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.261514 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.261564 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.261577 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.261594 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.261605 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.364111 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.364171 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.364183 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.364200 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.364213 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.467517 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.467583 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.467607 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.467636 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.467659 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.569774 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.569811 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.569824 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.569840 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.569851 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.639749 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:08 crc kubenswrapper[4689]: E0123 10:50:08.639909 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.671857 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 10:45:03.090608025 +0000 UTC Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.672753 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.672829 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.672853 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.672880 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.672892 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.776187 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.776247 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.776261 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.776280 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.776300 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.879515 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.879556 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.879567 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.879582 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.879593 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.982997 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.983070 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.983089 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.983111 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:08 crc kubenswrapper[4689]: I0123 10:50:08.983128 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:08Z","lastTransitionTime":"2026-01-23T10:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.085859 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.085914 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.085927 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.085945 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.085958 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.189285 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.189352 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.189381 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.189477 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.189508 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.291721 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.291784 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.291806 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.291833 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.291855 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.394511 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.394550 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.394559 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.394574 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.394584 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.496768 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.496832 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.496849 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.496871 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.496888 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.599252 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.599289 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.599302 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.599319 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.599329 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.639224 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.639278 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.639377 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:09 crc kubenswrapper[4689]: E0123 10:50:09.639544 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:09 crc kubenswrapper[4689]: E0123 10:50:09.639746 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:09 crc kubenswrapper[4689]: E0123 10:50:09.639678 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.651790 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.672294 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 05:33:09.279797959 +0000 UTC Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.702337 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.702366 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.702376 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.702389 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.702400 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.805962 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.806126 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.806198 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.806229 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.806298 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.909444 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.909529 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.909552 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.909582 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:09 crc kubenswrapper[4689]: I0123 10:50:09.909602 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:09Z","lastTransitionTime":"2026-01-23T10:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.013390 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.013478 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.013498 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.013522 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.013540 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.031342 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.031418 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.031440 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.031462 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.031482 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: E0123 10:50:10.057557 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:10Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.062454 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.062529 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.062547 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.063030 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.063100 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: E0123 10:50:10.078282 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:10Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.083381 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.083453 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.083490 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.083514 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.083532 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: E0123 10:50:10.096551 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:10Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.100798 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.100860 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.100882 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.100906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.100922 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: E0123 10:50:10.115515 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:10Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.120286 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.120339 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.120357 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.120380 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.120397 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: E0123 10:50:10.136671 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T10:50:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bd6f1f67-2691-4f88-98e8-da7f80565717\\\",\\\"systemUUID\\\":\\\"236a3513-f97a-4901-87ca-fa776d1157c7\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:10Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:10 crc kubenswrapper[4689]: E0123 10:50:10.136986 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.139026 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.139109 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.139129 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.139240 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.139270 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.243643 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.243722 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.243745 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.243774 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.243791 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.347610 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.347716 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.347773 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.347799 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.347852 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.452536 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.452625 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.452642 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.452665 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.452707 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.556036 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.556113 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.556130 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.556192 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.556212 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.639258 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:10 crc kubenswrapper[4689]: E0123 10:50:10.639398 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.659803 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.659867 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.659883 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.659905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.659921 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.673370 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 07:32:08.678693435 +0000 UTC Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.762691 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.762767 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.762792 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.762821 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.762842 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.866130 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.866234 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.866259 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.866288 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.866312 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.969443 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.969511 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.969531 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.969555 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:10 crc kubenswrapper[4689]: I0123 10:50:10.969573 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:10Z","lastTransitionTime":"2026-01-23T10:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.072462 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.072543 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.072573 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.072599 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.072619 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.175523 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.175598 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.175622 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.175653 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.175673 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.278992 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.279098 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.279118 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.279141 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.279203 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.382737 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.382802 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.382823 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.382850 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.382875 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.485739 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.485780 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.485791 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.485805 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.485817 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.589009 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.589329 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.589352 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.589379 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.589399 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.638955 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.638977 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:11 crc kubenswrapper[4689]: E0123 10:50:11.639091 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.639132 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:11 crc kubenswrapper[4689]: E0123 10:50:11.639282 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:11 crc kubenswrapper[4689]: E0123 10:50:11.639394 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.674341 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 07:02:28.961332591 +0000 UTC Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.691825 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.691908 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.691929 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.691962 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.691986 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.795017 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.795077 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.795093 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.795117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.795134 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.897596 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.897663 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.897685 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.897714 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:11 crc kubenswrapper[4689]: I0123 10:50:11.897738 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:11Z","lastTransitionTime":"2026-01-23T10:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.000426 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.000537 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.000559 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.000583 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.000601 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.103449 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.103548 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.103573 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.103595 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.103612 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.206439 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.206493 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.206508 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.206528 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.206542 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.309264 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.309324 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.309344 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.309371 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.309395 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.416350 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.416414 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.416433 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.416515 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.416538 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.519263 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.519321 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.519339 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.519363 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.519379 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.622811 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.622946 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.622972 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.623002 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.623025 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.639895 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:12 crc kubenswrapper[4689]: E0123 10:50:12.640073 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.675410 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 16:44:28.996888382 +0000 UTC Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.726089 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.726134 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.726184 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.726207 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.726225 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.829117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.829197 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.829212 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.829236 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.829247 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.933658 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.933730 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.933747 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.933773 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:12 crc kubenswrapper[4689]: I0123 10:50:12.933794 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:12Z","lastTransitionTime":"2026-01-23T10:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.037485 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.037532 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.037547 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.037565 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.037579 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.140675 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.140749 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.140768 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.140825 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.140845 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.243881 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.243988 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.244011 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.244034 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.244084 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.347348 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.347414 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.347431 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.347456 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.347473 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.451409 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.451465 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.451482 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.451507 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.451526 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.554568 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.554651 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.554676 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.554706 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.554725 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.638945 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.639065 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:13 crc kubenswrapper[4689]: E0123 10:50:13.639246 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.639292 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:13 crc kubenswrapper[4689]: E0123 10:50:13.639517 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:13 crc kubenswrapper[4689]: E0123 10:50:13.639643 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.658233 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.658284 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.658301 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.658324 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.658341 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.675891 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 23:48:40.66420616 +0000 UTC Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.760924 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.760985 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.761002 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.761026 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.761048 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.864116 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.864239 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.864263 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.864292 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.864312 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.967284 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.967329 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.967341 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.967357 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:13 crc kubenswrapper[4689]: I0123 10:50:13.967372 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:13Z","lastTransitionTime":"2026-01-23T10:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.070287 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.070332 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.070343 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.070358 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.070366 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.173069 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.173105 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.173114 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.173125 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.173133 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.276604 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.276678 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.276717 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.276738 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.276749 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.380114 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.380174 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.380183 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.380225 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.380234 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.482737 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.482794 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.482811 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.482835 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.482852 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.585673 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.585732 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.585749 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.585774 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.585791 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.639784 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:14 crc kubenswrapper[4689]: E0123 10:50:14.640017 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.676496 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 10:04:59.279703639 +0000 UTC Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.689027 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.689087 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.689104 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.689129 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.689171 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.793100 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.793222 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.793255 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.793286 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.793323 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.896192 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.896267 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.896287 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.896315 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.896337 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.999047 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.999113 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.999125 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:14 crc kubenswrapper[4689]: I0123 10:50:14.999141 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:14.999167 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:14Z","lastTransitionTime":"2026-01-23T10:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.101653 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.101730 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.101753 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.101783 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.101808 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.205355 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.205424 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.205446 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.205476 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.205497 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.307807 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.307858 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.307870 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.307886 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.307899 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.410569 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.410647 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.410666 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.410698 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.410718 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.514043 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.514117 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.514140 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.514216 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.514239 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.617914 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.617978 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.617996 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.618021 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.618039 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.639680 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:15 crc kubenswrapper[4689]: E0123 10:50:15.639896 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.639700 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:15 crc kubenswrapper[4689]: E0123 10:50:15.640063 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.640126 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:15 crc kubenswrapper[4689]: E0123 10:50:15.640364 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.659238 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d8de6cc-a03d-468b-bfe9-fbf544087653\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5dc38447e00e88741ca363ab5d50dd76ed3202e65e2d6b2d79e308e18713b89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7c5p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:15Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-sp7sf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.677243 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 03:34:32.293819567 +0000 UTC Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.680205 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rmzx9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5da240e2-abc7-443a-89d5-65297d2a14ea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5619176bbb15cdbc580b3efc3bdd64c48fab6cae6badec815d746ea81e3e2f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k4lg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:17Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rmzx9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.699364 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67c055d4-ebe7-45e0-b8fa-b597e3c7350e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ab0755451f8bceba930a504c3d43824106473868f5f9d0e146088000d012ca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3130df02e050241b109ccac13d1fd86ad7f25305454eb2e4153613b05c7530d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2wb44\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:49:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvgzn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.719978 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e093802e-6098-4478-934b-751fc2816a06\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T10:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e745aaca6a4e553c8d560dc0fb035fdf5ed97c2dfc93732d5bf2921994f4c94\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee63fd67ed6fdc75752ee12d819951282b00fd1bf7ecb9cbdb7fa4376e8557ca\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:48:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T10:48:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.721408 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.721479 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.721497 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.721526 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.721545 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.743731 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c49ccf579e14889b95797c2468fc37bb94cf348baed627d14843736a145dd8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T10:49:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.765817 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.791301 4689 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T10:49:13Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T10:50:15Z is after 2025-08-24T17:21:41Z" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.825692 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.825742 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.825761 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.825784 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.825802 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.842381 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=61.842355115 podStartE2EDuration="1m1.842355115s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:15.842266213 +0000 UTC m=+80.466946082" watchObservedRunningTime="2026-01-23 10:50:15.842355115 +0000 UTC m=+80.467035024" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.842962 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=33.84294512 podStartE2EDuration="33.84294512s" podCreationTimestamp="2026-01-23 10:49:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:15.823384488 +0000 UTC m=+80.448064357" watchObservedRunningTime="2026-01-23 10:50:15.84294512 +0000 UTC m=+80.467625029" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.875323 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-xxklh" podStartSLOduration=61.87530178 podStartE2EDuration="1m1.87530178s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:15.875194567 +0000 UTC m=+80.499874456" watchObservedRunningTime="2026-01-23 10:50:15.87530178 +0000 UTC m=+80.499981639" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.898571 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-llpck" podStartSLOduration=61.898531726 podStartE2EDuration="1m1.898531726s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:15.897786716 +0000 UTC m=+80.522466585" watchObservedRunningTime="2026-01-23 10:50:15.898531726 +0000 UTC m=+80.523211595" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.914584 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-rcnzm" podStartSLOduration=61.914569397 podStartE2EDuration="1m1.914569397s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:15.914428593 +0000 UTC m=+80.539108472" watchObservedRunningTime="2026-01-23 10:50:15.914569397 +0000 UTC m=+80.539249256" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.928097 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.928138 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.928165 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.928184 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.928196 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:15Z","lastTransitionTime":"2026-01-23T10:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.965219 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=6.965199335 podStartE2EDuration="6.965199335s" podCreationTimestamp="2026-01-23 10:50:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:15.964440986 +0000 UTC m=+80.589120855" watchObservedRunningTime="2026-01-23 10:50:15.965199335 +0000 UTC m=+80.589879194" Jan 23 10:50:15 crc kubenswrapper[4689]: I0123 10:50:15.987856 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=60.987835486 podStartE2EDuration="1m0.987835486s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:15.986888782 +0000 UTC m=+80.611568641" watchObservedRunningTime="2026-01-23 10:50:15.987835486 +0000 UTC m=+80.612515345" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.029855 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.029895 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.029906 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.029927 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.029939 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.132644 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.132707 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.132716 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.132732 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.132744 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.236491 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.236572 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.236596 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.236625 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.236645 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.339818 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.339880 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.339897 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.339920 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.339937 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.442317 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.442393 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.442420 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.442449 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.442507 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.545757 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.545850 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.545876 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.545905 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.545927 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.638925 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:16 crc kubenswrapper[4689]: E0123 10:50:16.639142 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.649381 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.649448 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.649464 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.649486 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.649504 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.677850 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 03:21:19.765515951 +0000 UTC Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.752513 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.752556 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.752567 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.752583 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.752595 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.855337 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.855411 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.855428 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.855454 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.855471 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.958823 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.958893 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.958911 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.958937 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:16 crc kubenswrapper[4689]: I0123 10:50:16.958956 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:16Z","lastTransitionTime":"2026-01-23T10:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.061756 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.061820 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.061837 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.061862 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.061884 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.165494 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.165576 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.165601 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.165633 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.165658 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.268361 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.268428 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.268446 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.268475 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.268495 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.371651 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.371696 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.371708 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.371726 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.371738 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.475232 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.475288 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.475300 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.475317 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.475329 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.578452 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.578533 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.578558 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.578588 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.578612 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.639783 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:17 crc kubenswrapper[4689]: E0123 10:50:17.639936 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.640038 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.640357 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:17 crc kubenswrapper[4689]: E0123 10:50:17.640502 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:17 crc kubenswrapper[4689]: E0123 10:50:17.640681 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.678553 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 04:25:42.429751388 +0000 UTC Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.681838 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.681893 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.681916 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.681944 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.681965 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.784978 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.785036 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.785072 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.785109 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.785133 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.889719 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.889779 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.889797 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.889821 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.889842 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.912318 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:17 crc kubenswrapper[4689]: E0123 10:50:17.912528 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:51:21.912501366 +0000 UTC m=+146.537181265 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.992813 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.992888 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.992912 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.992939 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:17 crc kubenswrapper[4689]: I0123 10:50:17.992956 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:17Z","lastTransitionTime":"2026-01-23T10:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.013537 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.013604 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.013656 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.013688 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.013802 4689 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.013865 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:51:22.013844116 +0000 UTC m=+146.638524005 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014142 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014222 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014245 4689 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014297 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 10:51:22.014281847 +0000 UTC m=+146.638961746 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014388 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014425 4689 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014444 4689 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014567 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 10:51:22.014540533 +0000 UTC m=+146.639220422 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014586 4689 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.014738 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 10:51:22.014695987 +0000 UTC m=+146.639375896 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.095322 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.095395 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.095433 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.095462 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.095484 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.198584 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.198649 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.198671 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.198700 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.198724 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.302203 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.302249 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.302267 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.302290 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.302310 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.405449 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.405518 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.405541 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.405569 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.405592 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.508380 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.508417 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.508429 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.508447 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.508461 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.611242 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.611312 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.611334 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.611363 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.611403 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.638946 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:18 crc kubenswrapper[4689]: E0123 10:50:18.640202 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.640627 4689 scope.go:117] "RemoveContainer" containerID="6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.678939 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 16:07:37.218645343 +0000 UTC Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.719830 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.719895 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.719916 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.719943 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.719965 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.823843 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.824291 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.824319 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.824350 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.824375 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.927804 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.927838 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.927853 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.927873 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:18 crc kubenswrapper[4689]: I0123 10:50:18.927888 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:18Z","lastTransitionTime":"2026-01-23T10:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.030574 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.030614 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.030628 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.030647 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.030661 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.101555 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/2.log" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.111300 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerStarted","Data":"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.111897 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.133225 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.133283 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.133302 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.133324 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.133342 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.148184 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podStartSLOduration=64.148169437 podStartE2EDuration="1m4.148169437s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:19.145206101 +0000 UTC m=+83.769885960" watchObservedRunningTime="2026-01-23 10:50:19.148169437 +0000 UTC m=+83.772849296" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.177631 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-rmzx9" podStartSLOduration=65.177612882 podStartE2EDuration="1m5.177612882s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:19.1603898 +0000 UTC m=+83.785069659" watchObservedRunningTime="2026-01-23 10:50:19.177612882 +0000 UTC m=+83.802292741" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.219556 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=64.219539987 podStartE2EDuration="1m4.219539987s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:19.21926201 +0000 UTC m=+83.843941869" watchObservedRunningTime="2026-01-23 10:50:19.219539987 +0000 UTC m=+83.844219846" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.219932 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvgzn" podStartSLOduration=64.219924797 podStartE2EDuration="1m4.219924797s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:19.180894156 +0000 UTC m=+83.805574015" watchObservedRunningTime="2026-01-23 10:50:19.219924797 +0000 UTC m=+83.844604676" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.234855 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.234898 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.234910 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.234925 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.234937 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.269995 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podStartSLOduration=65.269969291 podStartE2EDuration="1m5.269969291s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:19.268442392 +0000 UTC m=+83.893122271" watchObservedRunningTime="2026-01-23 10:50:19.269969291 +0000 UTC m=+83.894649190" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.337388 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.337434 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.337446 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.337462 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.337476 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.440520 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.440581 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.440600 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.440626 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.440644 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.543568 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.543613 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.543623 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.543639 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.543652 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.605237 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-cpc6c"] Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.605451 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:19 crc kubenswrapper[4689]: E0123 10:50:19.605645 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.639059 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.639140 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:19 crc kubenswrapper[4689]: E0123 10:50:19.639286 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:19 crc kubenswrapper[4689]: E0123 10:50:19.639431 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.645637 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.645729 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.645750 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.645773 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.645790 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.679466 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 19:57:43.995723919 +0000 UTC Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.748610 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.748670 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.748688 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.748711 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.748728 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.851566 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.851634 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.851652 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.851675 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.851694 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.955758 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.955822 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.955840 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.955864 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:19 crc kubenswrapper[4689]: I0123 10:50:19.955881 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:19Z","lastTransitionTime":"2026-01-23T10:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.058457 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.058807 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.058819 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.058835 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.058846 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:20Z","lastTransitionTime":"2026-01-23T10:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.160669 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.160708 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.160718 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.160733 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.160744 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:20Z","lastTransitionTime":"2026-01-23T10:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.263292 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.263344 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.263360 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.263377 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.263389 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:20Z","lastTransitionTime":"2026-01-23T10:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.365531 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.365646 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.365657 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.365672 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.365681 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:20Z","lastTransitionTime":"2026-01-23T10:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.468612 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.468666 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.468678 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.468697 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.468711 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:20Z","lastTransitionTime":"2026-01-23T10:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.476079 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.476131 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.476200 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.476236 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.476257 4689 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T10:50:20Z","lastTransitionTime":"2026-01-23T10:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.542017 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh"] Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.542600 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.545641 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.545824 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.546101 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.546375 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.639601 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:20 crc kubenswrapper[4689]: E0123 10:50:20.639903 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.643229 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.643386 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.643472 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.643536 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.643576 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.679808 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 23:47:28.755105155 +0000 UTC Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.679881 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.692245 4689 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.744091 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.744219 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.744287 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.744313 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.744328 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.744486 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.744412 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.746313 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.754445 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.772627 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-snbsh\" (UID: \"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: I0123 10:50:20.867661 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" Jan 23 10:50:20 crc kubenswrapper[4689]: W0123 10:50:20.894808 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod903aa9f8_b069_4e95_9cbd_0a4bfdb8fc6b.slice/crio-bb985b2bab21dd3b7ca3ef57bb24b9b71c5fe8e3050e8b3ec851a89af72ccc20 WatchSource:0}: Error finding container bb985b2bab21dd3b7ca3ef57bb24b9b71c5fe8e3050e8b3ec851a89af72ccc20: Status 404 returned error can't find the container with id bb985b2bab21dd3b7ca3ef57bb24b9b71c5fe8e3050e8b3ec851a89af72ccc20 Jan 23 10:50:21 crc kubenswrapper[4689]: I0123 10:50:21.119728 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" event={"ID":"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b","Type":"ContainerStarted","Data":"bb985b2bab21dd3b7ca3ef57bb24b9b71c5fe8e3050e8b3ec851a89af72ccc20"} Jan 23 10:50:21 crc kubenswrapper[4689]: I0123 10:50:21.639865 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:21 crc kubenswrapper[4689]: I0123 10:50:21.639935 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:21 crc kubenswrapper[4689]: E0123 10:50:21.640097 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 10:50:21 crc kubenswrapper[4689]: E0123 10:50:21.640304 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 10:50:21 crc kubenswrapper[4689]: I0123 10:50:21.640383 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:21 crc kubenswrapper[4689]: E0123 10:50:21.640696 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cpc6c" podUID="01ee7060-466f-4294-934f-3df3b9aa7afe" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.025513 4689 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.025729 4689 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.090661 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.091115 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.091635 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.092265 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.092971 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.093361 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.093670 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.094173 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.094438 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.094785 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwhbm"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.095067 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.105791 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.118850 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.118935 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.118956 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.119019 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.131879 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132034 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132288 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132396 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132472 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132535 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132550 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132663 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132743 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.132782 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.133069 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.133261 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.133558 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cnwdd"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.134128 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.134253 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.134362 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.134475 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.134537 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.134634 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.134739 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.135065 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.135170 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.141214 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.143238 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" event={"ID":"903aa9f8-b069-4e95-9cbd-0a4bfdb8fc6b","Type":"ContainerStarted","Data":"8773820a6003cea2d99fb0604094160831824c844a8df9c2679d9638bae92241"} Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.143609 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.144042 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.144443 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.145370 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.145508 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.146101 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.146598 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.147222 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.147859 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.170398 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-f2gqj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.171103 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.172938 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.173095 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.176327 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.176561 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.176599 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.176813 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.176932 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.177082 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.177916 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.178056 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.178214 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.179469 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-kz8dz"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.179977 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.180127 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.180597 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.182330 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.182467 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zhh2t"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.182747 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.183036 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.185445 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.189582 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.189801 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rkx2p"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.190502 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.191071 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.191513 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9gvwz"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.191864 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.192177 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.192249 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-gkpnx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.192815 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.199486 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.208686 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.209182 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.209468 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.209580 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.210070 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-hc5js"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.210543 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.217469 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.218173 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.218298 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.218328 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.218360 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.218448 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.218362 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.218904 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.219130 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220236 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220514 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220565 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220591 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220644 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220689 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220712 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220718 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.220597 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.222137 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.222345 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.222680 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.222760 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.222849 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.222949 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.222971 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.223041 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.223161 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.223271 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.223366 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.223743 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.224004 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.224092 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.224129 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.225678 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.225809 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.225931 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.226041 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.226138 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.240321 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.241772 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-frtfl"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.243656 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.243688 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.243857 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.245639 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.246657 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.250391 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.250459 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.264529 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.266411 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.266848 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.268484 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275190 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-config\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275243 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275269 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-client-ca\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275289 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-audit-dir\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275377 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275427 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-dir\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275491 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-serving-cert\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275536 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d2866dc5-5251-40f7-a434-73d886a12db0-machine-approver-tls\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275565 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f1b033c-cebb-40ef-a05c-798f5455e05f-config\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275634 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.275653 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277526 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cnwdd"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277630 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffzll\" (UniqueName: \"kubernetes.io/projected/7f1b033c-cebb-40ef-a05c-798f5455e05f-kube-api-access-ffzll\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277657 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4dc6a16-eedd-41d4-b059-38b10f8ce867-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277683 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gjms\" (UniqueName: \"kubernetes.io/projected/83e87693-d35f-4125-a703-f9c5e9a5652c-kube-api-access-7gjms\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277712 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-serving-cert\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277730 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7f1b033c-cebb-40ef-a05c-798f5455e05f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277747 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277775 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-audit-policies\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277790 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/275e6b8d-6343-4146-8f0d-f9b6125e272a-serving-cert\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277810 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7vmv\" (UniqueName: \"kubernetes.io/projected/275e6b8d-6343-4146-8f0d-f9b6125e272a-kube-api-access-b7vmv\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277831 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-encryption-config\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277868 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-policies\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277886 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277912 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7f1b033c-cebb-40ef-a05c-798f5455e05f-images\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277932 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277950 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7zvm\" (UniqueName: \"kubernetes.io/projected/a4dc6a16-eedd-41d4-b059-38b10f8ce867-kube-api-access-z7zvm\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277972 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjxd4\" (UniqueName: \"kubernetes.io/projected/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-kube-api-access-hjxd4\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.277990 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-client-ca\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.278009 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bn96j\" (UniqueName: \"kubernetes.io/projected/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-kube-api-access-bn96j\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.279112 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.279324 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.280082 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.280389 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281294 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281331 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4dc6a16-eedd-41d4-b059-38b10f8ce867-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281362 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281359 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f827151-d15b-4d39-a05c-87b0f985fcaa-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281418 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkl6b\" (UniqueName: \"kubernetes.io/projected/6f827151-d15b-4d39-a05c-87b0f985fcaa-kube-api-access-zkl6b\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281436 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft8k7\" (UniqueName: \"kubernetes.io/projected/d2866dc5-5251-40f7-a434-73d886a12db0-kube-api-access-ft8k7\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281459 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2866dc5-5251-40f7-a434-73d886a12db0-auth-proxy-config\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281496 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281550 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f827151-d15b-4d39-a05c-87b0f985fcaa-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281567 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281589 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2866dc5-5251-40f7-a434-73d886a12db0-config\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281617 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281646 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281662 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281678 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281693 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-config\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.281765 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-etcd-client\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.282007 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.283674 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.287017 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.289994 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.291315 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-97xbl"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.291537 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.291921 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.293220 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.293436 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.295773 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.297211 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-gp895"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.297581 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.297919 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.298119 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.298389 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.298438 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.298125 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.299420 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.316342 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.317611 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.318489 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.319681 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.327517 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.331214 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.331063 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.334249 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.338941 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.339407 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.339974 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.340209 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.340460 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-pmksl"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.340993 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.341202 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.341548 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.342246 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.343996 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.344551 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.346237 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-49rnx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.346802 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-qvgvs"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.347034 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.347210 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.348467 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.349254 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.351466 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.352181 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.352664 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.354498 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-hc5js"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.356314 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rkx2p"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.358893 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.360714 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-frtfl"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.361965 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9gvwz"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.363432 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kz8dz"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.364491 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sbk9v"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.365058 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.366051 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-r7vmg"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.366805 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.368568 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.369415 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xhgxj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.370849 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-gkpnx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.370947 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.371724 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-f2gqj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.372935 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.373583 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.373817 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwhbm"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.374941 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.375667 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.376873 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.377638 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.378759 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.380088 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.381199 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.382896 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383314 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7zvm\" (UniqueName: \"kubernetes.io/projected/a4dc6a16-eedd-41d4-b059-38b10f8ce867-kube-api-access-z7zvm\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383398 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjxd4\" (UniqueName: \"kubernetes.io/projected/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-kube-api-access-hjxd4\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383428 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2942f70a-140d-43a3-98e4-4105d2a6c9d5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383445 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-serving-cert\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383468 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-trusted-ca-bundle\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383486 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-client-ca\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383506 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-config\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383523 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-config\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383542 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bn96j\" (UniqueName: \"kubernetes.io/projected/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-kube-api-access-bn96j\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383550 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-gp895"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383559 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383576 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4dc6a16-eedd-41d4-b059-38b10f8ce867-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383594 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de449087-a423-43c1-9295-91572c72bedd-trusted-ca\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383609 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383626 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spmgx\" (UniqueName: \"kubernetes.io/projected/9d058744-ff52-4a7c-8e44-86c81270e7d1-kube-api-access-spmgx\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383641 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-encryption-config\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383658 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft8k7\" (UniqueName: \"kubernetes.io/projected/d2866dc5-5251-40f7-a434-73d886a12db0-kube-api-access-ft8k7\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383673 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ceaf3a09-638c-4a67-ab8c-f86103f9d359-metrics-tls\") pod \"dns-operator-744455d44c-rkx2p\" (UID: \"ceaf3a09-638c-4a67-ab8c-f86103f9d359\") " pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383689 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f827151-d15b-4d39-a05c-87b0f985fcaa-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383703 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkl6b\" (UniqueName: \"kubernetes.io/projected/6f827151-d15b-4d39-a05c-87b0f985fcaa-kube-api-access-zkl6b\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383721 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2866dc5-5251-40f7-a434-73d886a12db0-auth-proxy-config\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.384277 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-client-ca\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.384525 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zhh2t"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.385472 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d2866dc5-5251-40f7-a434-73d886a12db0-auth-proxy-config\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.385581 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.383737 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2942f70a-140d-43a3-98e4-4105d2a6c9d5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.385933 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.385957 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-oauth-config\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.385973 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-etcd-service-ca\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.385998 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9snd\" (UniqueName: \"kubernetes.io/projected/dd74410f-0023-4200-b7ca-30c04930c782-kube-api-access-g9snd\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386040 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f827151-d15b-4d39-a05c-87b0f985fcaa-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386059 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386085 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2866dc5-5251-40f7-a434-73d886a12db0-config\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386103 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-config\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386133 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd74410f-0023-4200-b7ca-30c04930c782-etcd-client\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386171 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386194 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-etcd-serving-ca\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386232 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386249 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386268 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2942f70a-140d-43a3-98e4-4105d2a6c9d5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386285 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386315 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-etcd-client\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386346 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386369 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-config\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386391 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-oauth-serving-cert\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386410 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de449087-a423-43c1-9295-91572c72bedd-config\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386428 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4qkr\" (UniqueName: \"kubernetes.io/projected/484d0401-5634-42e8-b09e-8c7eb65aa84c-kube-api-access-m4qkr\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386446 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386462 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-client-ca\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386480 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxmq5\" (UniqueName: \"kubernetes.io/projected/6c546a18-71be-4888-8c63-1fe9fb06768b-kube-api-access-kxmq5\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386497 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-config\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386655 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-serving-cert\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.386955 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f827151-d15b-4d39-a05c-87b0f985fcaa-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387136 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387329 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-audit-dir\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387355 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387356 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2866dc5-5251-40f7-a434-73d886a12db0-config\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387370 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-dir\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387388 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjfkm\" (UniqueName: \"kubernetes.io/projected/83fb5834-aa38-45d6-bce1-65ab7968a75d-kube-api-access-zjfkm\") pod \"cluster-samples-operator-665b6dd947-tmsmx\" (UID: \"83fb5834-aa38-45d6-bce1-65ab7968a75d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387406 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c546a18-71be-4888-8c63-1fe9fb06768b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387426 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn7g5\" (UniqueName: \"kubernetes.io/projected/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-kube-api-access-qn7g5\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387449 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-serving-cert\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387467 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d2866dc5-5251-40f7-a434-73d886a12db0-machine-approver-tls\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387694 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c546a18-71be-4888-8c63-1fe9fb06768b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387755 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-node-pullsecrets\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387774 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-etcd-client\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387789 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-service-ca\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387805 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6c546a18-71be-4888-8c63-1fe9fb06768b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387822 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d058744-ff52-4a7c-8e44-86c81270e7d1-serving-cert\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387840 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f1b033c-cebb-40ef-a05c-798f5455e05f-config\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.387998 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388031 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffzll\" (UniqueName: \"kubernetes.io/projected/7f1b033c-cebb-40ef-a05c-798f5455e05f-kube-api-access-ffzll\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388080 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388097 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-config\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388124 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-audit-dir\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388182 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4dc6a16-eedd-41d4-b059-38b10f8ce867-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388202 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/83fb5834-aa38-45d6-bce1-65ab7968a75d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-tmsmx\" (UID: \"83fb5834-aa38-45d6-bce1-65ab7968a75d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388224 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-serving-cert\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388239 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gjms\" (UniqueName: \"kubernetes.io/projected/83e87693-d35f-4125-a703-f9c5e9a5652c-kube-api-access-7gjms\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388254 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-trusted-ca-bundle\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388297 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjwtw\" (UniqueName: \"kubernetes.io/projected/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-kube-api-access-bjwtw\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388620 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7f1b033c-cebb-40ef-a05c-798f5455e05f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388638 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388665 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7m6l\" (UniqueName: \"kubernetes.io/projected/fa9894a6-c179-4b45-a036-b94c23125162-kube-api-access-g7m6l\") pod \"downloads-7954f5f757-hc5js\" (UID: \"fa9894a6-c179-4b45-a036-b94c23125162\") " pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388682 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-service-ca-bundle\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388698 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-audit\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388828 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-serving-cert\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388845 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd74410f-0023-4200-b7ca-30c04930c782-serving-cert\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388886 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/275e6b8d-6343-4146-8f0d-f9b6125e272a-serving-cert\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388915 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7vmv\" (UniqueName: \"kubernetes.io/projected/275e6b8d-6343-4146-8f0d-f9b6125e272a-kube-api-access-b7vmv\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388946 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-etcd-ca\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.388961 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb2th\" (UniqueName: \"kubernetes.io/projected/ceaf3a09-638c-4a67-ab8c-f86103f9d359-kube-api-access-xb2th\") pod \"dns-operator-744455d44c-rkx2p\" (UID: \"ceaf3a09-638c-4a67-ab8c-f86103f9d359\") " pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389022 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-audit-policies\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389056 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-encryption-config\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389047 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-dir\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389077 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-policies\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389094 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389133 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de449087-a423-43c1-9295-91572c72bedd-serving-cert\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389190 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389315 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-image-import-ca\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389364 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7f1b033c-cebb-40ef-a05c-798f5455e05f-images\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389392 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389418 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p85dj\" (UniqueName: \"kubernetes.io/projected/de449087-a423-43c1-9295-91572c72bedd-kube-api-access-p85dj\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.389881 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f827151-d15b-4d39-a05c-87b0f985fcaa-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.390584 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-audit-dir\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.391015 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.391521 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.391904 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4dc6a16-eedd-41d4-b059-38b10f8ce867-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.392671 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4dc6a16-eedd-41d4-b059-38b10f8ce867-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.392908 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.392958 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.393016 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f1b033c-cebb-40ef-a05c-798f5455e05f-config\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.393176 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.393199 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-config\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.407824 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-etcd-client\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.408308 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.408776 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-serving-cert\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.410258 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.410341 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.410561 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.410708 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.411330 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.412065 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.412130 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.412712 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7f1b033c-cebb-40ef-a05c-798f5455e05f-images\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.413687 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-audit-policies\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.408322 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-serving-cert\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.414409 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-client-ca\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.414454 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-config\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.414674 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.414689 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sbk9v"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.414723 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.415410 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-policies\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.415658 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d2866dc5-5251-40f7-a434-73d886a12db0-machine-approver-tls\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.416462 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7f1b033c-cebb-40ef-a05c-798f5455e05f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.418225 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-97xbl"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.418318 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/275e6b8d-6343-4146-8f0d-f9b6125e272a-serving-cert\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.420075 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.421182 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-pmksl"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.422432 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-encryption-config\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.422613 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.424713 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xhgxj"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.426074 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-49rnx"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.427290 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-z8tdh"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.428105 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.429048 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.430568 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.431704 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-w9zjb"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.432243 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.433179 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.433856 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-z8tdh"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.435019 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-w9zjb"] Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.453654 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.473425 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492185 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-oauth-config\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492213 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-etcd-service-ca\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492231 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9snd\" (UniqueName: \"kubernetes.io/projected/dd74410f-0023-4200-b7ca-30c04930c782-kube-api-access-g9snd\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492277 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-config\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492293 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd74410f-0023-4200-b7ca-30c04930c782-etcd-client\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492310 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-etcd-serving-ca\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492369 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnz85\" (UniqueName: \"kubernetes.io/projected/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-kube-api-access-mnz85\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492416 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2942f70a-140d-43a3-98e4-4105d2a6c9d5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492433 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492449 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/098f2e03-62e1-4e09-aa34-65d45a757c56-trusted-ca\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492467 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de449087-a423-43c1-9295-91572c72bedd-config\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492505 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-oauth-serving-cert\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492522 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4qkr\" (UniqueName: \"kubernetes.io/projected/484d0401-5634-42e8-b09e-8c7eb65aa84c-kube-api-access-m4qkr\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492540 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxmq5\" (UniqueName: \"kubernetes.io/projected/6c546a18-71be-4888-8c63-1fe9fb06768b-kube-api-access-kxmq5\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492573 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/098f2e03-62e1-4e09-aa34-65d45a757c56-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492593 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-serving-cert\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492612 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjfkm\" (UniqueName: \"kubernetes.io/projected/83fb5834-aa38-45d6-bce1-65ab7968a75d-kube-api-access-zjfkm\") pod \"cluster-samples-operator-665b6dd947-tmsmx\" (UID: \"83fb5834-aa38-45d6-bce1-65ab7968a75d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492628 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c546a18-71be-4888-8c63-1fe9fb06768b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492667 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn7g5\" (UniqueName: \"kubernetes.io/projected/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-kube-api-access-qn7g5\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492687 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c546a18-71be-4888-8c63-1fe9fb06768b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492704 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-node-pullsecrets\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492741 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-etcd-client\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492783 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-service-ca\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492838 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6c546a18-71be-4888-8c63-1fe9fb06768b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492855 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d058744-ff52-4a7c-8e44-86c81270e7d1-serving-cert\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492917 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-node-bootstrap-token\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492944 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-config\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492981 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/098f2e03-62e1-4e09-aa34-65d45a757c56-metrics-tls\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.492997 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-audit-dir\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493012 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/83fb5834-aa38-45d6-bce1-65ab7968a75d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-tmsmx\" (UID: \"83fb5834-aa38-45d6-bce1-65ab7968a75d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493083 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-trusted-ca-bundle\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493160 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjwtw\" (UniqueName: \"kubernetes.io/projected/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-kube-api-access-bjwtw\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493179 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7m6l\" (UniqueName: \"kubernetes.io/projected/fa9894a6-c179-4b45-a036-b94c23125162-kube-api-access-g7m6l\") pod \"downloads-7954f5f757-hc5js\" (UID: \"fa9894a6-c179-4b45-a036-b94c23125162\") " pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493195 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-service-ca-bundle\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493216 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-serving-cert\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493234 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-audit\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493249 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd74410f-0023-4200-b7ca-30c04930c782-serving-cert\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493271 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-etcd-ca\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493287 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb2th\" (UniqueName: \"kubernetes.io/projected/ceaf3a09-638c-4a67-ab8c-f86103f9d359-kube-api-access-xb2th\") pod \"dns-operator-744455d44c-rkx2p\" (UID: \"ceaf3a09-638c-4a67-ab8c-f86103f9d359\") " pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493303 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-certs\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493341 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de449087-a423-43c1-9295-91572c72bedd-serving-cert\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493357 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-image-import-ca\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493374 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p85dj\" (UniqueName: \"kubernetes.io/projected/de449087-a423-43c1-9295-91572c72bedd-kube-api-access-p85dj\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493405 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2942f70a-140d-43a3-98e4-4105d2a6c9d5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493420 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-serving-cert\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493439 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-trusted-ca-bundle\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493456 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-config\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493478 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq2z4\" (UniqueName: \"kubernetes.io/projected/098f2e03-62e1-4e09-aa34-65d45a757c56-kube-api-access-fq2z4\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493495 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-config\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493517 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de449087-a423-43c1-9295-91572c72bedd-trusted-ca\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493532 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493551 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spmgx\" (UniqueName: \"kubernetes.io/projected/9d058744-ff52-4a7c-8e44-86c81270e7d1-kube-api-access-spmgx\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493565 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-encryption-config\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493593 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ceaf3a09-638c-4a67-ab8c-f86103f9d359-metrics-tls\") pod \"dns-operator-744455d44c-rkx2p\" (UID: \"ceaf3a09-638c-4a67-ab8c-f86103f9d359\") " pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.493610 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2942f70a-140d-43a3-98e4-4105d2a6c9d5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.494293 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.494498 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-audit-dir\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.495713 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de449087-a423-43c1-9295-91572c72bedd-config\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.495855 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.494671 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-node-pullsecrets\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.496391 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-etcd-ca\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.496445 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.496472 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-etcd-serving-ca\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.496816 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-image-import-ca\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.496911 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-service-ca\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.497475 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-config\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.497634 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-config\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.497749 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-audit\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.498242 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-serving-cert\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.498369 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/83fb5834-aa38-45d6-bce1-65ab7968a75d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-tmsmx\" (UID: \"83fb5834-aa38-45d6-bce1-65ab7968a75d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.498909 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-oauth-serving-cert\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.499419 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ceaf3a09-638c-4a67-ab8c-f86103f9d359-metrics-tls\") pod \"dns-operator-744455d44c-rkx2p\" (UID: \"ceaf3a09-638c-4a67-ab8c-f86103f9d359\") " pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.499499 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-config\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.499614 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-trusted-ca-bundle\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.499800 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-config\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.499937 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9d058744-ff52-4a7c-8e44-86c81270e7d1-service-ca-bundle\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.499968 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-trusted-ca-bundle\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.500408 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-serving-cert\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.500967 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-serving-cert\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.501157 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c546a18-71be-4888-8c63-1fe9fb06768b-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.501355 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de449087-a423-43c1-9295-91572c72bedd-trusted-ca\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.501759 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-oauth-config\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.502378 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd74410f-0023-4200-b7ca-30c04930c782-serving-cert\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.502492 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d058744-ff52-4a7c-8e44-86c81270e7d1-serving-cert\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.503108 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c546a18-71be-4888-8c63-1fe9fb06768b-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.503132 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-encryption-config\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.503724 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de449087-a423-43c1-9295-91572c72bedd-serving-cert\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.504262 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2942f70a-140d-43a3-98e4-4105d2a6c9d5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.504307 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd74410f-0023-4200-b7ca-30c04930c782-etcd-service-ca\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.504642 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd74410f-0023-4200-b7ca-30c04930c782-etcd-client\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.504661 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-etcd-client\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.513873 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.533373 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.542078 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2942f70a-140d-43a3-98e4-4105d2a6c9d5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.553679 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.573404 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.592980 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.594768 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/098f2e03-62e1-4e09-aa34-65d45a757c56-trusted-ca\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.594812 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/098f2e03-62e1-4e09-aa34-65d45a757c56-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.594858 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-node-bootstrap-token\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.594927 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/098f2e03-62e1-4e09-aa34-65d45a757c56-metrics-tls\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.594996 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-certs\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.595068 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq2z4\" (UniqueName: \"kubernetes.io/projected/098f2e03-62e1-4e09-aa34-65d45a757c56-kube-api-access-fq2z4\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.595132 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnz85\" (UniqueName: \"kubernetes.io/projected/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-kube-api-access-mnz85\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.613302 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.634603 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.638993 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.654777 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.675327 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.694182 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.714008 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.734265 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.753202 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.774020 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.794196 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.813119 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.835600 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.855261 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.874739 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.893557 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.915332 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.933295 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.969301 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.973030 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 23 10:50:22 crc kubenswrapper[4689]: I0123 10:50:22.994573 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.014467 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.034179 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.053926 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.073948 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.107325 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-snbsh" podStartSLOduration=69.107307913 podStartE2EDuration="1m9.107307913s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:23.10720344 +0000 UTC m=+87.731883309" watchObservedRunningTime="2026-01-23 10:50:23.107307913 +0000 UTC m=+87.731987792" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.148623 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.159272 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.159465 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.173962 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.193741 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.214007 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.234253 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.252878 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.273081 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.293499 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.312490 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.333658 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.351436 4689 request.go:700] Waited for 1.009990223s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/secrets?fieldSelector=metadata.name%3Dingress-operator-dockercfg-7lnqk&limit=500&resourceVersion=0 Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.353400 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.373105 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.378977 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/098f2e03-62e1-4e09-aa34-65d45a757c56-metrics-tls\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.403813 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.406327 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/098f2e03-62e1-4e09-aa34-65d45a757c56-trusted-ca\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.414011 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.433397 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.453513 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.473439 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.493823 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.513930 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.533719 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.553389 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.573233 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.593358 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: E0123 10:50:23.596252 4689 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Jan 23 10:50:23 crc kubenswrapper[4689]: E0123 10:50:23.596383 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-node-bootstrap-token podName:5d87ebd8-b6d3-46d5-80e9-712e565fc21e nodeName:}" failed. No retries permitted until 2026-01-23 10:50:24.096346455 +0000 UTC m=+88.721026354 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-node-bootstrap-token") pod "machine-config-server-r7vmg" (UID: "5d87ebd8-b6d3-46d5-80e9-712e565fc21e") : failed to sync secret cache: timed out waiting for the condition Jan 23 10:50:23 crc kubenswrapper[4689]: E0123 10:50:23.596794 4689 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Jan 23 10:50:23 crc kubenswrapper[4689]: E0123 10:50:23.596888 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-certs podName:5d87ebd8-b6d3-46d5-80e9-712e565fc21e nodeName:}" failed. No retries permitted until 2026-01-23 10:50:24.096861788 +0000 UTC m=+88.721541687 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-certs") pod "machine-config-server-r7vmg" (UID: "5d87ebd8-b6d3-46d5-80e9-712e565fc21e") : failed to sync secret cache: timed out waiting for the condition Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.613369 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.634290 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.640327 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.640379 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.640454 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.652986 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.673338 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.693874 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.714654 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.734103 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.754088 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.775827 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.793490 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.814099 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.832914 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.854865 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.874298 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.893644 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.913610 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.934009 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.952442 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.972974 4689 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 23 10:50:23 crc kubenswrapper[4689]: I0123 10:50:23.994621 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.043343 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7zvm\" (UniqueName: \"kubernetes.io/projected/a4dc6a16-eedd-41d4-b059-38b10f8ce867-kube-api-access-z7zvm\") pod \"openshift-controller-manager-operator-756b6f6bc6-2wrl9\" (UID: \"a4dc6a16-eedd-41d4-b059-38b10f8ce867\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.064221 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bn96j\" (UniqueName: \"kubernetes.io/projected/9e8ec1f3-5f7f-4150-82e2-34b2f4910385-kube-api-access-bn96j\") pod \"apiserver-7bbb656c7d-w4md5\" (UID: \"9e8ec1f3-5f7f-4150-82e2-34b2f4910385\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.078516 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft8k7\" (UniqueName: \"kubernetes.io/projected/d2866dc5-5251-40f7-a434-73d886a12db0-kube-api-access-ft8k7\") pod \"machine-approver-56656f9798-cgckv\" (UID: \"d2866dc5-5251-40f7-a434-73d886a12db0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.098844 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjxd4\" (UniqueName: \"kubernetes.io/projected/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-kube-api-access-hjxd4\") pod \"route-controller-manager-6576b87f9c-94vk4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.106378 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkl6b\" (UniqueName: \"kubernetes.io/projected/6f827151-d15b-4d39-a05c-87b0f985fcaa-kube-api-access-zkl6b\") pod \"openshift-apiserver-operator-796bbdcf4f-7hf2k\" (UID: \"6f827151-d15b-4d39-a05c-87b0f985fcaa\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.113720 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-node-bootstrap-token\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.113804 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-certs\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.117761 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-node-bootstrap-token\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.117914 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-certs\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.142141 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffzll\" (UniqueName: \"kubernetes.io/projected/7f1b033c-cebb-40ef-a05c-798f5455e05f-kube-api-access-ffzll\") pod \"machine-api-operator-5694c8668f-f2gqj\" (UID: \"7f1b033c-cebb-40ef-a05c-798f5455e05f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.159322 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7vmv\" (UniqueName: \"kubernetes.io/projected/275e6b8d-6343-4146-8f0d-f9b6125e272a-kube-api-access-b7vmv\") pod \"controller-manager-879f6c89f-cnwdd\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.194465 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.205936 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gjms\" (UniqueName: \"kubernetes.io/projected/83e87693-d35f-4125-a703-f9c5e9a5652c-kube-api-access-7gjms\") pod \"oauth-openshift-558db77b4-cwhbm\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.208795 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.213794 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.223095 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.233516 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.240484 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" Jan 23 10:50:24 crc kubenswrapper[4689]: W0123 10:50:24.240683 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2866dc5_5251_40f7_a434_73d886a12db0.slice/crio-de7f8a376853ac8ab6986625ea5c3a37f2d86269efbecac28279c5c7c431b330 WatchSource:0}: Error finding container de7f8a376853ac8ab6986625ea5c3a37f2d86269efbecac28279c5c7c431b330: Status 404 returned error can't find the container with id de7f8a376853ac8ab6986625ea5c3a37f2d86269efbecac28279c5c7c431b330 Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.254049 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.272895 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.274301 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.285258 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.294394 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.300424 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.315597 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.317236 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.349062 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.352477 4689 request.go:700] Waited for 1.857712103s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/serviceaccounts/openshift-config-operator/token Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.358107 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2942f70a-140d-43a3-98e4-4105d2a6c9d5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w4kks\" (UID: \"2942f70a-140d-43a3-98e4-4105d2a6c9d5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.382717 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn7g5\" (UniqueName: \"kubernetes.io/projected/5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3-kube-api-access-qn7g5\") pod \"openshift-config-operator-7777fb866f-pr8zt\" (UID: \"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.391849 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p85dj\" (UniqueName: \"kubernetes.io/projected/de449087-a423-43c1-9295-91572c72bedd-kube-api-access-p85dj\") pod \"console-operator-58897d9998-zhh2t\" (UID: \"de449087-a423-43c1-9295-91572c72bedd\") " pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.415295 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4qkr\" (UniqueName: \"kubernetes.io/projected/484d0401-5634-42e8-b09e-8c7eb65aa84c-kube-api-access-m4qkr\") pod \"console-f9d7485db-kz8dz\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.416790 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.431737 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7m6l\" (UniqueName: \"kubernetes.io/projected/fa9894a6-c179-4b45-a036-b94c23125162-kube-api-access-g7m6l\") pod \"downloads-7954f5f757-hc5js\" (UID: \"fa9894a6-c179-4b45-a036-b94c23125162\") " pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.455300 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjfkm\" (UniqueName: \"kubernetes.io/projected/83fb5834-aa38-45d6-bce1-65ab7968a75d-kube-api-access-zjfkm\") pod \"cluster-samples-operator-665b6dd947-tmsmx\" (UID: \"83fb5834-aa38-45d6-bce1-65ab7968a75d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.457662 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.470811 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.478762 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.493713 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9snd\" (UniqueName: \"kubernetes.io/projected/dd74410f-0023-4200-b7ca-30c04930c782-kube-api-access-g9snd\") pod \"etcd-operator-b45778765-frtfl\" (UID: \"dd74410f-0023-4200-b7ca-30c04930c782\") " pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.506420 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spmgx\" (UniqueName: \"kubernetes.io/projected/9d058744-ff52-4a7c-8e44-86c81270e7d1-kube-api-access-spmgx\") pod \"authentication-operator-69f744f599-9gvwz\" (UID: \"9d058744-ff52-4a7c-8e44-86c81270e7d1\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.507010 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.515495 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6c546a18-71be-4888-8c63-1fe9fb06768b-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.548937 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjwtw\" (UniqueName: \"kubernetes.io/projected/05112afa-e07a-4342-9d37-2a6b9b7ac9a2-kube-api-access-bjwtw\") pod \"apiserver-76f77b778f-gkpnx\" (UID: \"05112afa-e07a-4342-9d37-2a6b9b7ac9a2\") " pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.548986 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb2th\" (UniqueName: \"kubernetes.io/projected/ceaf3a09-638c-4a67-ab8c-f86103f9d359-kube-api-access-xb2th\") pod \"dns-operator-744455d44c-rkx2p\" (UID: \"ceaf3a09-638c-4a67-ab8c-f86103f9d359\") " pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.570143 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxmq5\" (UniqueName: \"kubernetes.io/projected/6c546a18-71be-4888-8c63-1fe9fb06768b-kube-api-access-kxmq5\") pod \"cluster-image-registry-operator-dc59b4c8b-kcczs\" (UID: \"6c546a18-71be-4888-8c63-1fe9fb06768b\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.593292 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnz85\" (UniqueName: \"kubernetes.io/projected/5d87ebd8-b6d3-46d5-80e9-712e565fc21e-kube-api-access-mnz85\") pod \"machine-config-server-r7vmg\" (UID: \"5d87ebd8-b6d3-46d5-80e9-712e565fc21e\") " pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.611595 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/098f2e03-62e1-4e09-aa34-65d45a757c56-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.635233 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq2z4\" (UniqueName: \"kubernetes.io/projected/098f2e03-62e1-4e09-aa34-65d45a757c56-kube-api-access-fq2z4\") pod \"ingress-operator-5b745b69d9-n65mf\" (UID: \"098f2e03-62e1-4e09-aa34-65d45a757c56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.635647 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.653780 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.670989 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.671053 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-r7vmg" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.678569 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.701422 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.706781 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.715631 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.724772 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.733405 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 23 10:50:24 crc kubenswrapper[4689]: W0123 10:50:24.735507 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d87ebd8_b6d3_46d5_80e9_712e565fc21e.slice/crio-606dc32f736776d3e1afc724eeb35a9465f35ba1c3b031705b89185e97fa0a19 WatchSource:0}: Error finding container 606dc32f736776d3e1afc724eeb35a9465f35ba1c3b031705b89185e97fa0a19: Status 404 returned error can't find the container with id 606dc32f736776d3e1afc724eeb35a9465f35ba1c3b031705b89185e97fa0a19 Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.742546 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.753207 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.755231 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.764390 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.764789 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-f2gqj"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.765244 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cnwdd"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.792659 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.795127 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.819714 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zhh2t"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824513 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/babc1ea3-3d75-429d-b5a1-c966c652d827-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824542 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-bound-sa-token\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824563 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/babc1ea3-3d75-429d-b5a1-c966c652d827-images\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824580 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6105379b-1fb8-4384-b6d5-67b4db5498e5-apiservice-cert\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824599 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj4p4\" (UniqueName: \"kubernetes.io/projected/c7b42454-f36b-4ab0-86d0-a2decba67e28-kube-api-access-bj4p4\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824613 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rmrh\" (UniqueName: \"kubernetes.io/projected/61756198-4db5-4ee2-b629-c92d64b9bf73-kube-api-access-5rmrh\") pod \"control-plane-machine-set-operator-78cbb6b69f-5zz8l\" (UID: \"61756198-4db5-4ee2-b629-c92d64b9bf73\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824646 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mknbl\" (UniqueName: \"kubernetes.io/projected/a8f4f0c7-61db-4423-8f3a-229e4ac94951-kube-api-access-mknbl\") pod \"package-server-manager-789f6589d5-wx978\" (UID: \"a8f4f0c7-61db-4423-8f3a-229e4ac94951\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824662 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8j8h\" (UniqueName: \"kubernetes.io/projected/3377dc50-b5b0-40d0-9b16-295713320fcd-kube-api-access-j8j8h\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824690 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3377dc50-b5b0-40d0-9b16-295713320fcd-srv-cert\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824715 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-trusted-ca\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824730 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-registration-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824745 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c83ad550-7576-440f-bac5-8308b6c801b0-secret-volume\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824759 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9kxc\" (UniqueName: \"kubernetes.io/projected/dc66c81d-1ea4-494e-99d4-1fbdb64a4a47-kube-api-access-d9kxc\") pod \"multus-admission-controller-857f4d67dd-pmksl\" (UID: \"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824784 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q7pm\" (UniqueName: \"kubernetes.io/projected/da2befb5-074b-4665-b2ed-23d3f1b1df31-kube-api-access-6q7pm\") pod \"migrator-59844c95c7-945fj\" (UID: \"da2befb5-074b-4665-b2ed-23d3f1b1df31\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824814 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824832 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/61756198-4db5-4ee2-b629-c92d64b9bf73-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5zz8l\" (UID: \"61756198-4db5-4ee2-b629-c92d64b9bf73\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824848 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30e5a25-1fad-49ec-9fe8-263ab088fc64-serving-cert\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824865 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-proxy-tls\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824879 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-metrics-certs\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824904 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/08709336-c061-456a-ab17-4530f25582f3-signing-cabundle\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824921 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zwqk\" (UniqueName: \"kubernetes.io/projected/6105379b-1fb8-4384-b6d5-67b4db5498e5-kube-api-access-9zwqk\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824935 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30e5a25-1fad-49ec-9fe8-263ab088fc64-config\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824961 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq8cp\" (UniqueName: \"kubernetes.io/projected/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-kube-api-access-kq8cp\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.824983 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/6105379b-1fb8-4384-b6d5-67b4db5498e5-tmpfs\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825005 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c83ad550-7576-440f-bac5-8308b6c801b0-config-volume\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825018 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f9dd4a75-d3a3-42d6-a305-43e95e450611-srv-cert\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825033 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-socket-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825047 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klkjc\" (UniqueName: \"kubernetes.io/projected/08709336-c061-456a-ab17-4530f25582f3-kube-api-access-klkjc\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825081 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825108 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825127 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/dc66c81d-1ea4-494e-99d4-1fbdb64a4a47-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-pmksl\" (UID: \"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825165 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6105379b-1fb8-4384-b6d5-67b4db5498e5-webhook-cert\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825179 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/08709336-c061-456a-ab17-4530f25582f3-signing-key\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825194 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b68417f8-3be8-499d-941c-f2b885b3a467-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825471 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c8jk\" (UniqueName: \"kubernetes.io/projected/7e63a4b4-9425-4409-9b0b-291e771cb38a-kube-api-access-5c8jk\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825501 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-mountpoint-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825537 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9abf0120-b5d1-4f43-871d-b73a24382940-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825581 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wchq\" (UniqueName: \"kubernetes.io/projected/babc1ea3-3d75-429d-b5a1-c966c652d827-kube-api-access-6wchq\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825598 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-stats-auth\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825624 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9abf0120-b5d1-4f43-871d-b73a24382940-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825657 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3377dc50-b5b0-40d0-9b16-295713320fcd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825673 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-plugins-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825687 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5cf083-96b6-4cc8-818a-ca50c4689165-config\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825702 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-default-certificate\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825726 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-csi-data-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825740 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5cf083-96b6-4cc8-818a-ca50c4689165-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825808 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e63a4b4-9425-4409-9b0b-291e771cb38a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825825 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/babc1ea3-3d75-429d-b5a1-c966c652d827-proxy-tls\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825840 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqbxk\" (UniqueName: \"kubernetes.io/projected/c83ad550-7576-440f-bac5-8308b6c801b0-kube-api-access-gqbxk\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825866 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b68417f8-3be8-499d-941c-f2b885b3a467-config\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825912 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9mmq\" (UniqueName: \"kubernetes.io/projected/f9dd4a75-d3a3-42d6-a305-43e95e450611-kube-api-access-w9mmq\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825927 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd5cf083-96b6-4cc8-818a-ca50c4689165-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825954 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b68417f8-3be8-499d-941c-f2b885b3a467-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825970 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f9dd4a75-d3a3-42d6-a305-43e95e450611-profile-collector-cert\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.825985 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8f4f0c7-61db-4423-8f3a-229e4ac94951-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wx978\" (UID: \"a8f4f0c7-61db-4423-8f3a-229e4ac94951\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826010 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx2qd\" (UniqueName: \"kubernetes.io/projected/e30e5a25-1fad-49ec-9fe8-263ab088fc64-kube-api-access-rx2qd\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826025 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5v8k\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-kube-api-access-n5v8k\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826051 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-registry-certificates\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826066 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c7b42454-f36b-4ab0-86d0-a2decba67e28-service-ca-bundle\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826082 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dd2t\" (UniqueName: \"kubernetes.io/projected/60f6136e-990e-4ca3-88d3-ff00f4db14e7-kube-api-access-5dd2t\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826097 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826120 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-registry-tls\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826135 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e63a4b4-9425-4409-9b0b-291e771cb38a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.826164 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77vxh\" (UniqueName: \"kubernetes.io/projected/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-kube-api-access-77vxh\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: E0123 10:50:24.827619 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:25.327602732 +0000 UTC m=+89.952282591 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:24 crc kubenswrapper[4689]: W0123 10:50:24.850528 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4dc6a16_eedd_41d4_b059_38b10f8ce867.slice/crio-593985ef2d97012e3187e30045b0050042f17595dbb2b0bcf4ef737ce1633cc4 WatchSource:0}: Error finding container 593985ef2d97012e3187e30045b0050042f17595dbb2b0bcf4ef737ce1633cc4: Status 404 returned error can't find the container with id 593985ef2d97012e3187e30045b0050042f17595dbb2b0bcf4ef737ce1633cc4 Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.912079 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.916223 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.920853 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwhbm"] Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944070 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944256 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-csi-data-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944296 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5cf083-96b6-4cc8-818a-ca50c4689165-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944333 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e63a4b4-9425-4409-9b0b-291e771cb38a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944360 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/babc1ea3-3d75-429d-b5a1-c966c652d827-proxy-tls\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944382 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqbxk\" (UniqueName: \"kubernetes.io/projected/c83ad550-7576-440f-bac5-8308b6c801b0-kube-api-access-gqbxk\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944408 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b68417f8-3be8-499d-941c-f2b885b3a467-config\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944438 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9mmq\" (UniqueName: \"kubernetes.io/projected/f9dd4a75-d3a3-42d6-a305-43e95e450611-kube-api-access-w9mmq\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944453 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd5cf083-96b6-4cc8-818a-ca50c4689165-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944470 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b68417f8-3be8-499d-941c-f2b885b3a467-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944485 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f9dd4a75-d3a3-42d6-a305-43e95e450611-profile-collector-cert\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944502 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8f4f0c7-61db-4423-8f3a-229e4ac94951-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wx978\" (UID: \"a8f4f0c7-61db-4423-8f3a-229e4ac94951\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944523 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx2qd\" (UniqueName: \"kubernetes.io/projected/e30e5a25-1fad-49ec-9fe8-263ab088fc64-kube-api-access-rx2qd\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944542 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/08fe053a-867d-498a-a459-96916291511e-metrics-tls\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944572 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5v8k\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-kube-api-access-n5v8k\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944590 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6rqt\" (UniqueName: \"kubernetes.io/projected/08fe053a-867d-498a-a459-96916291511e-kube-api-access-m6rqt\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944607 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-registry-certificates\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944629 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c7b42454-f36b-4ab0-86d0-a2decba67e28-service-ca-bundle\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944648 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dd2t\" (UniqueName: \"kubernetes.io/projected/60f6136e-990e-4ca3-88d3-ff00f4db14e7-kube-api-access-5dd2t\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944668 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944692 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08fe053a-867d-498a-a459-96916291511e-config-volume\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944716 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-registry-tls\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944732 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e63a4b4-9425-4409-9b0b-291e771cb38a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944750 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77vxh\" (UniqueName: \"kubernetes.io/projected/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-kube-api-access-77vxh\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/babc1ea3-3d75-429d-b5a1-c966c652d827-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944797 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-bound-sa-token\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944821 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/babc1ea3-3d75-429d-b5a1-c966c652d827-images\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944847 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6105379b-1fb8-4384-b6d5-67b4db5498e5-apiservice-cert\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944871 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj4p4\" (UniqueName: \"kubernetes.io/projected/c7b42454-f36b-4ab0-86d0-a2decba67e28-kube-api-access-bj4p4\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944892 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rmrh\" (UniqueName: \"kubernetes.io/projected/61756198-4db5-4ee2-b629-c92d64b9bf73-kube-api-access-5rmrh\") pod \"control-plane-machine-set-operator-78cbb6b69f-5zz8l\" (UID: \"61756198-4db5-4ee2-b629-c92d64b9bf73\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944914 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mknbl\" (UniqueName: \"kubernetes.io/projected/a8f4f0c7-61db-4423-8f3a-229e4ac94951-kube-api-access-mknbl\") pod \"package-server-manager-789f6589d5-wx978\" (UID: \"a8f4f0c7-61db-4423-8f3a-229e4ac94951\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944936 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8j8h\" (UniqueName: \"kubernetes.io/projected/3377dc50-b5b0-40d0-9b16-295713320fcd-kube-api-access-j8j8h\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944957 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3377dc50-b5b0-40d0-9b16-295713320fcd-srv-cert\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.944978 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-trusted-ca\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945000 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-registration-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945019 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c83ad550-7576-440f-bac5-8308b6c801b0-secret-volume\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945034 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9kxc\" (UniqueName: \"kubernetes.io/projected/dc66c81d-1ea4-494e-99d4-1fbdb64a4a47-kube-api-access-d9kxc\") pod \"multus-admission-controller-857f4d67dd-pmksl\" (UID: \"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945053 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q7pm\" (UniqueName: \"kubernetes.io/projected/da2befb5-074b-4665-b2ed-23d3f1b1df31-kube-api-access-6q7pm\") pod \"migrator-59844c95c7-945fj\" (UID: \"da2befb5-074b-4665-b2ed-23d3f1b1df31\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945078 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/61756198-4db5-4ee2-b629-c92d64b9bf73-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5zz8l\" (UID: \"61756198-4db5-4ee2-b629-c92d64b9bf73\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945093 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30e5a25-1fad-49ec-9fe8-263ab088fc64-serving-cert\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945120 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-proxy-tls\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945136 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-metrics-certs\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945180 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp9z2\" (UniqueName: \"kubernetes.io/projected/9de9798d-d640-4f37-aee7-9db1fa7c4289-kube-api-access-dp9z2\") pod \"ingress-canary-w9zjb\" (UID: \"9de9798d-d640-4f37-aee7-9db1fa7c4289\") " pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945210 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/08709336-c061-456a-ab17-4530f25582f3-signing-cabundle\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945228 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zwqk\" (UniqueName: \"kubernetes.io/projected/6105379b-1fb8-4384-b6d5-67b4db5498e5-kube-api-access-9zwqk\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945244 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30e5a25-1fad-49ec-9fe8-263ab088fc64-config\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945260 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq8cp\" (UniqueName: \"kubernetes.io/projected/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-kube-api-access-kq8cp\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945286 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/6105379b-1fb8-4384-b6d5-67b4db5498e5-tmpfs\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945313 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c83ad550-7576-440f-bac5-8308b6c801b0-config-volume\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945334 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f9dd4a75-d3a3-42d6-a305-43e95e450611-srv-cert\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945356 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-socket-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945377 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klkjc\" (UniqueName: \"kubernetes.io/projected/08709336-c061-456a-ab17-4530f25582f3-kube-api-access-klkjc\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945395 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945414 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945429 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/dc66c81d-1ea4-494e-99d4-1fbdb64a4a47-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-pmksl\" (UID: \"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945444 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6105379b-1fb8-4384-b6d5-67b4db5498e5-webhook-cert\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945459 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/08709336-c061-456a-ab17-4530f25582f3-signing-key\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945474 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b68417f8-3be8-499d-941c-f2b885b3a467-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945491 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c8jk\" (UniqueName: \"kubernetes.io/projected/7e63a4b4-9425-4409-9b0b-291e771cb38a-kube-api-access-5c8jk\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945518 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-mountpoint-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945540 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9abf0120-b5d1-4f43-871d-b73a24382940-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945572 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wchq\" (UniqueName: \"kubernetes.io/projected/babc1ea3-3d75-429d-b5a1-c966c652d827-kube-api-access-6wchq\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945595 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-stats-auth\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945620 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9abf0120-b5d1-4f43-871d-b73a24382940-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945638 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3377dc50-b5b0-40d0-9b16-295713320fcd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945655 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-plugins-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945677 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9de9798d-d640-4f37-aee7-9db1fa7c4289-cert\") pod \"ingress-canary-w9zjb\" (UID: \"9de9798d-d640-4f37-aee7-9db1fa7c4289\") " pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945701 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5cf083-96b6-4cc8-818a-ca50c4689165-config\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.945718 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-default-certificate\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.947261 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c7b42454-f36b-4ab0-86d0-a2decba67e28-service-ca-bundle\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: E0123 10:50:24.947340 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:25.447325703 +0000 UTC m=+90.072005562 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.947472 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-csi-data-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.947614 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/babc1ea3-3d75-429d-b5a1-c966c652d827-images\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.949461 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.954872 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6105379b-1fb8-4384-b6d5-67b4db5498e5-apiservice-cert\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.955518 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/08709336-c061-456a-ab17-4530f25582f3-signing-cabundle\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.956232 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c83ad550-7576-440f-bac5-8308b6c801b0-secret-volume\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.956541 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-registration-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.957840 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/babc1ea3-3d75-429d-b5a1-c966c652d827-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.959000 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e30e5a25-1fad-49ec-9fe8-263ab088fc64-config\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.960194 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8f4f0c7-61db-4423-8f3a-229e4ac94951-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wx978\" (UID: \"a8f4f0c7-61db-4423-8f3a-229e4ac94951\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.961112 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/6105379b-1fb8-4384-b6d5-67b4db5498e5-tmpfs\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.961875 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c83ad550-7576-440f-bac5-8308b6c801b0-config-volume\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.961923 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e63a4b4-9425-4409-9b0b-291e771cb38a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.962044 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-socket-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.965843 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9abf0120-b5d1-4f43-871d-b73a24382940-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.966083 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-registry-certificates\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.967069 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.967846 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5cf083-96b6-4cc8-818a-ca50c4689165-config\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.967896 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-plugins-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.968286 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/61756198-4db5-4ee2-b629-c92d64b9bf73-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5zz8l\" (UID: \"61756198-4db5-4ee2-b629-c92d64b9bf73\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.969412 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/60f6136e-990e-4ca3-88d3-ff00f4db14e7-mountpoint-dir\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.969585 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b68417f8-3be8-499d-941c-f2b885b3a467-config\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.970316 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-trusted-ca\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.970687 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e63a4b4-9425-4409-9b0b-291e771cb38a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.971763 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-metrics-certs\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.972117 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-proxy-tls\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.974842 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-registry-tls\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.978511 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f9dd4a75-d3a3-42d6-a305-43e95e450611-srv-cert\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.979309 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-stats-auth\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.979715 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3377dc50-b5b0-40d0-9b16-295713320fcd-srv-cert\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.982815 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b68417f8-3be8-499d-941c-f2b885b3a467-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.984755 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6105379b-1fb8-4384-b6d5-67b4db5498e5-webhook-cert\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.985082 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/dc66c81d-1ea4-494e-99d4-1fbdb64a4a47-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-pmksl\" (UID: \"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.988430 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c7b42454-f36b-4ab0-86d0-a2decba67e28-default-certificate\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.990005 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/babc1ea3-3d75-429d-b5a1-c966c652d827-proxy-tls\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.990659 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/08709336-c061-456a-ab17-4530f25582f3-signing-key\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.992057 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.992181 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f9dd4a75-d3a3-42d6-a305-43e95e450611-profile-collector-cert\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.993109 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3377dc50-b5b0-40d0-9b16-295713320fcd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:24 crc kubenswrapper[4689]: I0123 10:50:24.994539 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9abf0120-b5d1-4f43-871d-b73a24382940-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.000385 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5cf083-96b6-4cc8-818a-ca50c4689165-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:25 crc kubenswrapper[4689]: W0123 10:50:25.004850 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e8ec1f3_5f7f_4150_82e2_34b2f4910385.slice/crio-e05a7cbc535c459a4b183f35f15b1c53989cef59a04947d185d3f18468c42193 WatchSource:0}: Error finding container e05a7cbc535c459a4b183f35f15b1c53989cef59a04947d185d3f18468c42193: Status 404 returned error can't find the container with id e05a7cbc535c459a4b183f35f15b1c53989cef59a04947d185d3f18468c42193 Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.005124 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mknbl\" (UniqueName: \"kubernetes.io/projected/a8f4f0c7-61db-4423-8f3a-229e4ac94951-kube-api-access-mknbl\") pod \"package-server-manager-789f6589d5-wx978\" (UID: \"a8f4f0c7-61db-4423-8f3a-229e4ac94951\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.005488 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e30e5a25-1fad-49ec-9fe8-263ab088fc64-serving-cert\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.032576 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-bound-sa-token\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.047921 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/08fe053a-867d-498a-a459-96916291511e-metrics-tls\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.047984 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6rqt\" (UniqueName: \"kubernetes.io/projected/08fe053a-867d-498a-a459-96916291511e-kube-api-access-m6rqt\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.048018 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08fe053a-867d-498a-a459-96916291511e-config-volume\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.048098 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.048123 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp9z2\" (UniqueName: \"kubernetes.io/projected/9de9798d-d640-4f37-aee7-9db1fa7c4289-kube-api-access-dp9z2\") pod \"ingress-canary-w9zjb\" (UID: \"9de9798d-d640-4f37-aee7-9db1fa7c4289\") " pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.048237 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9de9798d-d640-4f37-aee7-9db1fa7c4289-cert\") pod \"ingress-canary-w9zjb\" (UID: \"9de9798d-d640-4f37-aee7-9db1fa7c4289\") " pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.048303 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dd2t\" (UniqueName: \"kubernetes.io/projected/60f6136e-990e-4ca3-88d3-ff00f4db14e7-kube-api-access-5dd2t\") pod \"csi-hostpathplugin-xhgxj\" (UID: \"60f6136e-990e-4ca3-88d3-ff00f4db14e7\") " pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.048607 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:25.548588149 +0000 UTC m=+90.173268008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.049213 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08fe053a-867d-498a-a459-96916291511e-config-volume\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.059034 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj4p4\" (UniqueName: \"kubernetes.io/projected/c7b42454-f36b-4ab0-86d0-a2decba67e28-kube-api-access-bj4p4\") pod \"router-default-5444994796-qvgvs\" (UID: \"c7b42454-f36b-4ab0-86d0-a2decba67e28\") " pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.078459 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/08fe053a-867d-498a-a459-96916291511e-metrics-tls\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.079250 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9de9798d-d640-4f37-aee7-9db1fa7c4289-cert\") pod \"ingress-canary-w9zjb\" (UID: \"9de9798d-d640-4f37-aee7-9db1fa7c4289\") " pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.092543 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rmrh\" (UniqueName: \"kubernetes.io/projected/61756198-4db5-4ee2-b629-c92d64b9bf73-kube-api-access-5rmrh\") pod \"control-plane-machine-set-operator-78cbb6b69f-5zz8l\" (UID: \"61756198-4db5-4ee2-b629-c92d64b9bf73\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.113062 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77vxh\" (UniqueName: \"kubernetes.io/projected/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-kube-api-access-77vxh\") pod \"marketplace-operator-79b997595-97xbl\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.115447 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.115489 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.119741 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8j8h\" (UniqueName: \"kubernetes.io/projected/3377dc50-b5b0-40d0-9b16-295713320fcd-kube-api-access-j8j8h\") pod \"olm-operator-6b444d44fb-99t57\" (UID: \"3377dc50-b5b0-40d0-9b16-295713320fcd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.126206 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-frtfl"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.126256 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.142040 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klkjc\" (UniqueName: \"kubernetes.io/projected/08709336-c061-456a-ab17-4530f25582f3-kube-api-access-klkjc\") pod \"service-ca-9c57cc56f-gp895\" (UID: \"08709336-c061-456a-ab17-4530f25582f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.143787 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-gp895" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.150238 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.150396 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.150688 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:25.650671307 +0000 UTC m=+90.275351166 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.161051 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-hc5js"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.166496 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9kxc\" (UniqueName: \"kubernetes.io/projected/dc66c81d-1ea4-494e-99d4-1fbdb64a4a47-kube-api-access-d9kxc\") pod \"multus-admission-controller-857f4d67dd-pmksl\" (UID: \"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.166611 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.173902 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q7pm\" (UniqueName: \"kubernetes.io/projected/da2befb5-074b-4665-b2ed-23d3f1b1df31-kube-api-access-6q7pm\") pod \"migrator-59844c95c7-945fj\" (UID: \"da2befb5-074b-4665-b2ed-23d3f1b1df31\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" Jan 23 10:50:25 crc kubenswrapper[4689]: W0123 10:50:25.177221 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2942f70a_140d_43a3_98e4_4105d2a6c9d5.slice/crio-f9c5c463a855dcd2bd7eced5ab238d4231bb190b34d8cb777e6572bafb6eda67 WatchSource:0}: Error finding container f9c5c463a855dcd2bd7eced5ab238d4231bb190b34d8cb777e6572bafb6eda67: Status 404 returned error can't find the container with id f9c5c463a855dcd2bd7eced5ab238d4231bb190b34d8cb777e6572bafb6eda67 Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.191962 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5v8k\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-kube-api-access-n5v8k\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.192383 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" event={"ID":"7f1b033c-cebb-40ef-a05c-798f5455e05f","Type":"ContainerStarted","Data":"fef6ca61009e03a2f72029028cd50707d36c539c15c382d5611c18145034c729"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.192442 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" event={"ID":"7f1b033c-cebb-40ef-a05c-798f5455e05f","Type":"ContainerStarted","Data":"bdbe243edda45c9702a72d28d6785bbff88ac82b5d2dbbf2b2d4bb4bd3c98db4"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.201530 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" Jan 23 10:50:25 crc kubenswrapper[4689]: W0123 10:50:25.206267 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd74410f_0023_4200_b7ca_30c04930c782.slice/crio-218543cc723f0bf4b122b9efc1fd566c2d4125de886232e8b8cc2769fba78336 WatchSource:0}: Error finding container 218543cc723f0bf4b122b9efc1fd566c2d4125de886232e8b8cc2769fba78336: Status 404 returned error can't find the container with id 218543cc723f0bf4b122b9efc1fd566c2d4125de886232e8b8cc2769fba78336 Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.206822 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" event={"ID":"6f827151-d15b-4d39-a05c-87b0f985fcaa","Type":"ContainerStarted","Data":"fcd3477bf76538c1fefd38d32de3491153ab37f71163fc0aec56d2aba2c27666"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.211399 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" event={"ID":"83e87693-d35f-4125-a703-f9c5e9a5652c","Type":"ContainerStarted","Data":"4186dfb4538871753d7ad6e1b7cdf1ba91f00ef304c64e773ae8af4272e67ed5"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.211445 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zwqk\" (UniqueName: \"kubernetes.io/projected/6105379b-1fb8-4384-b6d5-67b4db5498e5-kube-api-access-9zwqk\") pod \"packageserver-d55dfcdfc-zg96c\" (UID: \"6105379b-1fb8-4384-b6d5-67b4db5498e5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.222997 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" event={"ID":"9e8ec1f3-5f7f-4150-82e2-34b2f4910385","Type":"ContainerStarted","Data":"e05a7cbc535c459a4b183f35f15b1c53989cef59a04947d185d3f18468c42193"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.227403 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" event={"ID":"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4","Type":"ContainerStarted","Data":"1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.227923 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.227999 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" event={"ID":"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4","Type":"ContainerStarted","Data":"b0776652588bbe0d46f0143f5ccb42280388e657e73d8dae8cf9487895e40e6d"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.228173 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.228787 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq8cp\" (UniqueName: \"kubernetes.io/projected/cab4ae0f-8813-48bc-b9a7-0bfb40776d0b-kube-api-access-kq8cp\") pod \"machine-config-controller-84d6567774-dtkdv\" (UID: \"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.244615 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.245755 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kz8dz"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.246069 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.251637 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.252340 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.254848 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:25.752980341 +0000 UTC m=+90.377660200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.256484 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" event={"ID":"d2866dc5-5251-40f7-a434-73d886a12db0","Type":"ContainerStarted","Data":"6e430ff5ce53e0618f1eb318d3f86187c68093aa4d836d5084ae26b004134c14"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.256542 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" event={"ID":"d2866dc5-5251-40f7-a434-73d886a12db0","Type":"ContainerStarted","Data":"de7f8a376853ac8ab6986625ea5c3a37f2d86269efbecac28279c5c7c431b330"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.261559 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx2qd\" (UniqueName: \"kubernetes.io/projected/e30e5a25-1fad-49ec-9fe8-263ab088fc64-kube-api-access-rx2qd\") pod \"service-ca-operator-777779d784-49rnx\" (UID: \"e30e5a25-1fad-49ec-9fe8-263ab088fc64\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.263394 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-r7vmg" event={"ID":"5d87ebd8-b6d3-46d5-80e9-712e565fc21e","Type":"ContainerStarted","Data":"7477af5009a1d041a99facc9c9723da6a7241188ea3f96465b090d1b2983e339"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.263458 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-r7vmg" event={"ID":"5d87ebd8-b6d3-46d5-80e9-712e565fc21e","Type":"ContainerStarted","Data":"606dc32f736776d3e1afc724eeb35a9465f35ba1c3b031705b89185e97fa0a19"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.278903 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b68417f8-3be8-499d-941c-f2b885b3a467-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gwfkj\" (UID: \"b68417f8-3be8-499d-941c-f2b885b3a467\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.305236 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.308035 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqbxk\" (UniqueName: \"kubernetes.io/projected/c83ad550-7576-440f-bac5-8308b6c801b0-kube-api-access-gqbxk\") pod \"collect-profiles-29486085-kzpnx\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.308715 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" event={"ID":"de449087-a423-43c1-9295-91572c72bedd","Type":"ContainerStarted","Data":"3aac11f83dbd726f26316e50e83e15df37364620d26b36a0a5590be9d5ba391a"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.309219 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.313388 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" event={"ID":"a4dc6a16-eedd-41d4-b059-38b10f8ce867","Type":"ContainerStarted","Data":"593985ef2d97012e3187e30045b0050042f17595dbb2b0bcf4ef737ce1633cc4"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.319346 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.319416 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.323166 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" event={"ID":"275e6b8d-6343-4146-8f0d-f9b6125e272a","Type":"ContainerStarted","Data":"7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.323236 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" event={"ID":"275e6b8d-6343-4146-8f0d-f9b6125e272a","Type":"ContainerStarted","Data":"f46bc0cd972cd3192f88295c01ccf6a856783119221d8170e33e7973f1f31898"} Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.323854 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.332041 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd5cf083-96b6-4cc8-818a-ca50c4689165-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-6lzwm\" (UID: \"bd5cf083-96b6-4cc8-818a-ca50c4689165\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.336297 4689 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-cnwdd container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.336359 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" podUID="275e6b8d-6343-4146-8f0d-f9b6125e272a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.352423 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.353279 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.355847 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c8jk\" (UniqueName: \"kubernetes.io/projected/7e63a4b4-9425-4409-9b0b-291e771cb38a-kube-api-access-5c8jk\") pod \"kube-storage-version-migrator-operator-b67b599dd-bcppn\" (UID: \"7e63a4b4-9425-4409-9b0b-291e771cb38a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.358358 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:25.858326283 +0000 UTC m=+90.483006142 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.377211 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wchq\" (UniqueName: \"kubernetes.io/projected/babc1ea3-3d75-429d-b5a1-c966c652d827-kube-api-access-6wchq\") pod \"machine-config-operator-74547568cd-jt8ww\" (UID: \"babc1ea3-3d75-429d-b5a1-c966c652d827\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.392363 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.393636 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.399255 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9gvwz"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.410501 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-gkpnx"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.412809 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9mmq\" (UniqueName: \"kubernetes.io/projected/f9dd4a75-d3a3-42d6-a305-43e95e450611-kube-api-access-w9mmq\") pod \"catalog-operator-68c6474976-xvxpp\" (UID: \"f9dd4a75-d3a3-42d6-a305-43e95e450611\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.415775 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6rqt\" (UniqueName: \"kubernetes.io/projected/08fe053a-867d-498a-a459-96916291511e-kube-api-access-m6rqt\") pod \"dns-default-z8tdh\" (UID: \"08fe053a-867d-498a-a459-96916291511e\") " pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.430987 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.445262 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp9z2\" (UniqueName: \"kubernetes.io/projected/9de9798d-d640-4f37-aee7-9db1fa7c4289-kube-api-access-dp9z2\") pod \"ingress-canary-w9zjb\" (UID: \"9de9798d-d640-4f37-aee7-9db1fa7c4289\") " pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.456636 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.457636 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.458977 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:25.958955634 +0000 UTC m=+90.583635683 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.475492 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.476077 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.489003 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.496631 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.520672 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.535908 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.557666 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.557930 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.057884421 +0000 UTC m=+90.682564280 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.557982 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.558338 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.058320522 +0000 UTC m=+90.683000371 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.593546 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.595805 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rkx2p"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.618733 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.629727 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-w9zjb" Jan 23 10:50:25 crc kubenswrapper[4689]: W0123 10:50:25.651276 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d058744_ff52_4a7c_8e44_86c81270e7d1.slice/crio-b9e7780236939f637511eee98cc4373cdb4aacce849a93bf9e302eb13d77e558 WatchSource:0}: Error finding container b9e7780236939f637511eee98cc4373cdb4aacce849a93bf9e302eb13d77e558: Status 404 returned error can't find the container with id b9e7780236939f637511eee98cc4373cdb4aacce849a93bf9e302eb13d77e558 Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.660062 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.660498 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.160475952 +0000 UTC m=+90.785155811 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: W0123 10:50:25.661325 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05112afa_e07a_4342_9d37_2a6b9b7ac9a2.slice/crio-b1fd5738ca452d29982188bee382ece6cf6397a91b07716fcc34df6ecb68ae43 WatchSource:0}: Error finding container b1fd5738ca452d29982188bee382ece6cf6397a91b07716fcc34df6ecb68ae43: Status 404 returned error can't find the container with id b1fd5738ca452d29982188bee382ece6cf6397a91b07716fcc34df6ecb68ae43 Jan 23 10:50:25 crc kubenswrapper[4689]: W0123 10:50:25.700006 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod098f2e03_62e1_4e09_aa34_65d45a757c56.slice/crio-0fa765c3660541be2fd4ef0b23570716f0136ba9853e60320b9380df5cd10497 WatchSource:0}: Error finding container 0fa765c3660541be2fd4ef0b23570716f0136ba9853e60320b9380df5cd10497: Status 404 returned error can't find the container with id 0fa765c3660541be2fd4ef0b23570716f0136ba9853e60320b9380df5cd10497 Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.764113 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.764576 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.264557271 +0000 UTC m=+90.889237190 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.865300 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.865505 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.365476679 +0000 UTC m=+90.990156538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.865884 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.866370 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.366350562 +0000 UTC m=+90.991030421 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.894438 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.942382 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57"] Jan 23 10:50:25 crc kubenswrapper[4689]: I0123 10:50:25.971825 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:25 crc kubenswrapper[4689]: E0123 10:50:25.972247 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.472232007 +0000 UTC m=+91.096911866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.001823 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-gp895"] Jan 23 10:50:26 crc kubenswrapper[4689]: W0123 10:50:26.038982 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8f4f0c7_61db_4423_8f3a_229e4ac94951.slice/crio-daabd6a58c15b077ee6b83737590852c22493c0b85d5134337a596f39c53aac0 WatchSource:0}: Error finding container daabd6a58c15b077ee6b83737590852c22493c0b85d5134337a596f39c53aac0: Status 404 returned error can't find the container with id daabd6a58c15b077ee6b83737590852c22493c0b85d5134337a596f39c53aac0 Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.063218 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-97xbl"] Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.081821 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.082224 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.582208877 +0000 UTC m=+91.206888736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.161587 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-r7vmg" podStartSLOduration=4.161562043 podStartE2EDuration="4.161562043s" podCreationTimestamp="2026-01-23 10:50:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:26.126126743 +0000 UTC m=+90.750806612" watchObservedRunningTime="2026-01-23 10:50:26.161562043 +0000 UTC m=+90.786241902" Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.189294 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.193491 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.693443441 +0000 UTC m=+91.318123300 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.290951 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.291573 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.791551047 +0000 UTC m=+91.416230986 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.298073 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" podStartSLOduration=71.298040983 podStartE2EDuration="1m11.298040983s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:26.264072412 +0000 UTC m=+90.888752271" watchObservedRunningTime="2026-01-23 10:50:26.298040983 +0000 UTC m=+90.922720842" Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.342551 4689 generic.go:334] "Generic (PLEG): container finished" podID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerID="053e80ebbd40e3573bd4f7ed946bb6f66cca9956b19cca9bf246785959a68041" exitCode=0 Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.342617 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" event={"ID":"9e8ec1f3-5f7f-4150-82e2-34b2f4910385","Type":"ContainerDied","Data":"053e80ebbd40e3573bd4f7ed946bb6f66cca9956b19cca9bf246785959a68041"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.344932 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" event={"ID":"098f2e03-62e1-4e09-aa34-65d45a757c56","Type":"ContainerStarted","Data":"0fa765c3660541be2fd4ef0b23570716f0136ba9853e60320b9380df5cd10497"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.348731 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" event={"ID":"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3","Type":"ContainerStarted","Data":"91e9a5f5c8b394fd7c8a4c796e0f400bb62517ffca9fe95f07070fae702282b0"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.350345 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" event={"ID":"83fb5834-aa38-45d6-bce1-65ab7968a75d","Type":"ContainerStarted","Data":"cee829f07a7be9a003c8d931405e06db073f10ce168f82c17f512fda54e96a4e"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.393691 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.394004 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:26.893985774 +0000 UTC m=+91.518665633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.397197 4689 csr.go:261] certificate signing request csr-b7wc6 is approved, waiting to be issued Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.411180 4689 csr.go:257] certificate signing request csr-b7wc6 is issued Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.413535 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" event={"ID":"de449087-a423-43c1-9295-91572c72bedd","Type":"ContainerStarted","Data":"e3da4255118290d8cb3a7be69771468a1e1d0e106c873e2be77ab1cd10e1d1b9"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.430524 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" event={"ID":"83e87693-d35f-4125-a703-f9c5e9a5652c","Type":"ContainerStarted","Data":"911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.430956 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.444073 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" event={"ID":"2942f70a-140d-43a3-98e4-4105d2a6c9d5","Type":"ContainerStarted","Data":"47a5532b2eec2f676ed719d98e9a2c56ab82f5fb4835694db627196b8c18a3bd"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.444110 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" event={"ID":"2942f70a-140d-43a3-98e4-4105d2a6c9d5","Type":"ContainerStarted","Data":"f9c5c463a855dcd2bd7eced5ab238d4231bb190b34d8cb777e6572bafb6eda67"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.447428 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-gp895" event={"ID":"08709336-c061-456a-ab17-4530f25582f3","Type":"ContainerStarted","Data":"2aebe2cca229accf9012c8dac287e108399746adddc641dbaef4c3bdb50393c5"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.451819 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" event={"ID":"6c546a18-71be-4888-8c63-1fe9fb06768b","Type":"ContainerStarted","Data":"d412481945519c774ad425a7b2439e43cdc80cfbef09168888f32be3148f23f5"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.453890 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hc5js" event={"ID":"fa9894a6-c179-4b45-a036-b94c23125162","Type":"ContainerStarted","Data":"1fe2c5f37042c73cfa268c18b8a6018265744b24f785ceb4d868e7104cabc397"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.458829 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" event={"ID":"a8f4f0c7-61db-4423-8f3a-229e4ac94951","Type":"ContainerStarted","Data":"daabd6a58c15b077ee6b83737590852c22493c0b85d5134337a596f39c53aac0"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.460872 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kz8dz" event={"ID":"484d0401-5634-42e8-b09e-8c7eb65aa84c","Type":"ContainerStarted","Data":"e43e5995d23af20e7a17ad62ad0cceda853081effa76c143ba60abd9706ee1b9"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.462578 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qvgvs" event={"ID":"c7b42454-f36b-4ab0-86d0-a2decba67e28","Type":"ContainerStarted","Data":"73ae010f11cbaaa4d362f70e7ff3f7a521ae2b47fae4ebdbe2da79cbf7cda416"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.464799 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" event={"ID":"05112afa-e07a-4342-9d37-2a6b9b7ac9a2","Type":"ContainerStarted","Data":"b1fd5738ca452d29982188bee382ece6cf6397a91b07716fcc34df6ecb68ae43"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.466638 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" event={"ID":"6f827151-d15b-4d39-a05c-87b0f985fcaa","Type":"ContainerStarted","Data":"cd3d1a5362f7c511b7c706d8d5ca7e7e2affa4f7b20ad8ab424e953e3392df09"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.471395 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" event={"ID":"7f1b033c-cebb-40ef-a05c-798f5455e05f","Type":"ContainerStarted","Data":"bc48f76afd777dd4acf73d9c4051c6903ddfc92081f7bc9f0d1d909371080217"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.473769 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" event={"ID":"ceaf3a09-638c-4a67-ab8c-f86103f9d359","Type":"ContainerStarted","Data":"ff9e39243587a2c892b8f76f7d8315595b86226bfa49fd0f57009d4f2ab347b0"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.479061 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" event={"ID":"3377dc50-b5b0-40d0-9b16-295713320fcd","Type":"ContainerStarted","Data":"0e160d7db216a2b3d567a79fed5ffa8ee4a8bd309560304469bbca97cb02224a"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.491166 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" event={"ID":"9d058744-ff52-4a7c-8e44-86c81270e7d1","Type":"ContainerStarted","Data":"b9e7780236939f637511eee98cc4373cdb4aacce849a93bf9e302eb13d77e558"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.492811 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" event={"ID":"a4dc6a16-eedd-41d4-b059-38b10f8ce867","Type":"ContainerStarted","Data":"1a6672e98c014b05e14f0cb7eedf9e923f524d9bea0f96804642cf558dd78a31"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.499548 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.500884 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.000869944 +0000 UTC m=+91.625549803 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.562595 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" event={"ID":"d2866dc5-5251-40f7-a434-73d886a12db0","Type":"ContainerStarted","Data":"f4771e4b1ad82a0ce12c1ef1e28fc40f240160393a6c76790deecdf203a3e0c2"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.583538 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" event={"ID":"dd74410f-0023-4200-b7ca-30c04930c782","Type":"ContainerStarted","Data":"218543cc723f0bf4b122b9efc1fd566c2d4125de886232e8b8cc2769fba78336"} Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.598944 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.600352 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.602604 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.102583523 +0000 UTC m=+91.727263372 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.694520 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podStartSLOduration=72.69450466 podStartE2EDuration="1m12.69450466s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:26.694382747 +0000 UTC m=+91.319062606" watchObservedRunningTime="2026-01-23 10:50:26.69450466 +0000 UTC m=+91.319184509" Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.704455 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.708306 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.208292854 +0000 UTC m=+91.832972703 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.742564 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.831785 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.832453 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.332412537 +0000 UTC m=+91.957092396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.851288 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" podStartSLOduration=71.851268291 podStartE2EDuration="1m11.851268291s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:26.845479572 +0000 UTC m=+91.470159431" watchObservedRunningTime="2026-01-23 10:50:26.851268291 +0000 UTC m=+91.475948150" Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.880898 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx"] Jan 23 10:50:26 crc kubenswrapper[4689]: I0123 10:50:26.933386 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:26 crc kubenswrapper[4689]: E0123 10:50:26.934135 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.434124276 +0000 UTC m=+92.058804125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.041753 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7hf2k" podStartSLOduration=73.041737205 podStartE2EDuration="1m13.041737205s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.041204062 +0000 UTC m=+91.665883921" watchObservedRunningTime="2026-01-23 10:50:27.041737205 +0000 UTC m=+91.666417064" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.043698 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.044075 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.544058805 +0000 UTC m=+92.168738664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.145119 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.145433 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.645422594 +0000 UTC m=+92.270102453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.145947 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-f2gqj" podStartSLOduration=72.145922868 podStartE2EDuration="1m12.145922868s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.137794079 +0000 UTC m=+91.762473928" watchObservedRunningTime="2026-01-23 10:50:27.145922868 +0000 UTC m=+91.770602737" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.194359 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.246925 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.247447 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.74742201 +0000 UTC m=+92.372101869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.262912 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w4kks" podStartSLOduration=72.262890757 podStartE2EDuration="1m12.262890757s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.260069245 +0000 UTC m=+91.884749124" watchObservedRunningTime="2026-01-23 10:50:27.262890757 +0000 UTC m=+91.887570616" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.264308 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2wrl9" podStartSLOduration=72.264301663 podStartE2EDuration="1m12.264301663s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.229430349 +0000 UTC m=+91.854110208" watchObservedRunningTime="2026-01-23 10:50:27.264301663 +0000 UTC m=+91.888981532" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.348115 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.348474 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.848462941 +0000 UTC m=+92.473142800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.413915 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-23 10:45:26 +0000 UTC, rotation deadline is 2026-11-12 02:41:28.129184593 +0000 UTC Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.413964 4689 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7023h51m0.715223052s for next certificate rotation Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.459190 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.459518 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:27.95950371 +0000 UTC m=+92.584183569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.560473 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.560748 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.060735626 +0000 UTC m=+92.685415475 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.636717 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgckv" podStartSLOduration=73.636698044 podStartE2EDuration="1m13.636698044s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.606528271 +0000 UTC m=+92.231208130" watchObservedRunningTime="2026-01-23 10:50:27.636698044 +0000 UTC m=+92.261377903" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.637335 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" event={"ID":"6c546a18-71be-4888-8c63-1fe9fb06768b","Type":"ContainerStarted","Data":"17533ef15e24316475f0f4bf50033f86ee8faccacc3eaa7fa741bbdff8179f50"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.663901 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.664939 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.164904607 +0000 UTC m=+92.789584466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.690119 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" podStartSLOduration=73.690094893 podStartE2EDuration="1m13.690094893s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.655380963 +0000 UTC m=+92.280060822" watchObservedRunningTime="2026-01-23 10:50:27.690094893 +0000 UTC m=+92.314774752" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.720348 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.720388 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.720401 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" event={"ID":"a8f4f0c7-61db-4423-8f3a-229e4ac94951","Type":"ContainerStarted","Data":"0dfd51238ced2f022083ef050a4014a45c0db53c123ecc77d442e04c076bdb17"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.730235 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" event={"ID":"dd74410f-0023-4200-b7ca-30c04930c782","Type":"ContainerStarted","Data":"543a328d9c788ee2138923e1161b37599d9cf8abda1e9f1354f8e38a7d27a958"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.757321 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-xhgxj"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.769864 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.770532 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.270512216 +0000 UTC m=+92.895192075 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.781354 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" event={"ID":"585f99c4-5f5e-4caf-9b9f-31a7f666bea8","Type":"ContainerStarted","Data":"3f7139052bf5ffff520c62185a9480cd5e73c5a8c9e36464a70fe198d19b72ac"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.784762 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-gp895" event={"ID":"08709336-c061-456a-ab17-4530f25582f3","Type":"ContainerStarted","Data":"a016e08d88400eda176e53d81ce903bf42a686f0156cda653955cd5236567df2"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.803820 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" event={"ID":"c83ad550-7576-440f-bac5-8308b6c801b0","Type":"ContainerStarted","Data":"1b86e57f0a95ec6a16c8c17bdc30462239bd9c532fd62aca399fbdd43ac37525"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.828157 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.855742 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kcczs" podStartSLOduration=72.855720821 podStartE2EDuration="1m12.855720821s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.853216437 +0000 UTC m=+92.477896296" watchObservedRunningTime="2026-01-23 10:50:27.855720821 +0000 UTC m=+92.480400680" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.874378 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" event={"ID":"3377dc50-b5b0-40d0-9b16-295713320fcd","Type":"ContainerStarted","Data":"111c929bd1b9db70f524544791cc6fbb0fb391a0342bc4ef07d2d96d46ba3dab"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.875372 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.875757 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.876750 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.3767324 +0000 UTC m=+93.001412259 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.891328 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-z8tdh"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.893471 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.893547 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.897546 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" event={"ID":"9d058744-ff52-4a7c-8e44-86c81270e7d1","Type":"ContainerStarted","Data":"3228ffcb48d7c3d7668df8877738301ff810d2f4229fe537aba32e0352d31988"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.906698 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.922607 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-frtfl" podStartSLOduration=72.922588876 podStartE2EDuration="1m12.922588876s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.920092372 +0000 UTC m=+92.544772241" watchObservedRunningTime="2026-01-23 10:50:27.922588876 +0000 UTC m=+92.547268735" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.925784 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-pmksl"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.951956 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" event={"ID":"83fb5834-aa38-45d6-bce1-65ab7968a75d","Type":"ContainerStarted","Data":"aaef9cd880145345e0c6b1945332b2475b03a1653db975bc69e490e8dfb31806"} Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.969108 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-49rnx"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.980088 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:27 crc kubenswrapper[4689]: E0123 10:50:27.981722 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.481705982 +0000 UTC m=+93.106385841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.981962 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-w9zjb"] Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.993585 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-gp895" podStartSLOduration=72.993556176 podStartE2EDuration="1m12.993556176s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:27.981424355 +0000 UTC m=+92.606104214" watchObservedRunningTime="2026-01-23 10:50:27.993556176 +0000 UTC m=+92.618236035" Jan 23 10:50:27 crc kubenswrapper[4689]: I0123 10:50:27.994117 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn"] Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.008496 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" podStartSLOduration=74.008470499 podStartE2EDuration="1m14.008470499s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:28.006919179 +0000 UTC m=+92.631599028" watchObservedRunningTime="2026-01-23 10:50:28.008470499 +0000 UTC m=+92.633150348" Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.051638 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podStartSLOduration=73.051619035 podStartE2EDuration="1m13.051619035s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:28.051366309 +0000 UTC m=+92.676046168" watchObservedRunningTime="2026-01-23 10:50:28.051619035 +0000 UTC m=+92.676298894" Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.073919 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj"] Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.082570 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.084039 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.584022186 +0000 UTC m=+93.208702045 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.149686 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv"] Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.155672 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm"] Jan 23 10:50:28 crc kubenswrapper[4689]: W0123 10:50:28.158982 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb68417f8_3be8_499d_941c_f2b885b3a467.slice/crio-9a832b5010d70cb0c82097e9aaa3ea44c7d642cb9323b75bea674e18febbf11f WatchSource:0}: Error finding container 9a832b5010d70cb0c82097e9aaa3ea44c7d642cb9323b75bea674e18febbf11f: Status 404 returned error can't find the container with id 9a832b5010d70cb0c82097e9aaa3ea44c7d642cb9323b75bea674e18febbf11f Jan 23 10:50:28 crc kubenswrapper[4689]: W0123 10:50:28.159713 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcab4ae0f_8813_48bc_b9a7_0bfb40776d0b.slice/crio-b6e18d055c09e2cc031f958d233d08684b7435fb4470cf6b0724d1ceab91d070 WatchSource:0}: Error finding container b6e18d055c09e2cc031f958d233d08684b7435fb4470cf6b0724d1ceab91d070: Status 404 returned error can't find the container with id b6e18d055c09e2cc031f958d233d08684b7435fb4470cf6b0724d1ceab91d070 Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.169203 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp"] Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.185697 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.186336 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.68631374 +0000 UTC m=+93.310993779 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.286840 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.287292 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.787274769 +0000 UTC m=+93.411954628 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.388802 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.389136 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.889124501 +0000 UTC m=+93.513804360 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.491125 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.491510 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.991486606 +0000 UTC m=+93.616166465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.491642 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.492003 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:28.991995939 +0000 UTC m=+93.616675798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.592463 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.592766 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.092751333 +0000 UTC m=+93.717431192 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.694077 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.699411 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.198821923 +0000 UTC m=+93.823501782 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.795846 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.796491 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.296474658 +0000 UTC m=+93.921154517 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.897916 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:28 crc kubenswrapper[4689]: E0123 10:50:28.898284 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.398272458 +0000 UTC m=+94.022952317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.967362 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hc5js" event={"ID":"fa9894a6-c179-4b45-a036-b94c23125162","Type":"ContainerStarted","Data":"d778f1fb3374d50358dab02ff6858745a22ad7313dc78c7dd36f554e7a066555"} Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.967957 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.979638 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.979688 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 10:50:28 crc kubenswrapper[4689]: I0123 10:50:28.988403 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-hc5js" podStartSLOduration=73.98838518 podStartE2EDuration="1m13.98838518s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:28.987802674 +0000 UTC m=+93.612482533" watchObservedRunningTime="2026-01-23 10:50:28.98838518 +0000 UTC m=+93.613065039" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:28.999836 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.000206 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.500191882 +0000 UTC m=+94.124871751 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.004413 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" event={"ID":"61756198-4db5-4ee2-b629-c92d64b9bf73","Type":"ContainerStarted","Data":"c2db4510c1c85819e6462d34f26a9dce2bb521dc04b1b1d70041a4a08079990c"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.004528 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" event={"ID":"61756198-4db5-4ee2-b629-c92d64b9bf73","Type":"ContainerStarted","Data":"9088f640d8d1ec2d986822adf8de53fd15fe97425d244d8184bd3235800ec98b"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.037113 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5zz8l" podStartSLOduration=74.037094539 podStartE2EDuration="1m14.037094539s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.035778985 +0000 UTC m=+93.660458854" watchObservedRunningTime="2026-01-23 10:50:29.037094539 +0000 UTC m=+93.661774398" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.049854 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" event={"ID":"a8f4f0c7-61db-4423-8f3a-229e4ac94951","Type":"ContainerStarted","Data":"99e38766149d092ea1445a9f449e7740604ee04a96eac6479ea7d55dedd7919b"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.049958 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.057788 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" event={"ID":"f9dd4a75-d3a3-42d6-a305-43e95e450611","Type":"ContainerStarted","Data":"ac3bb8059a50b264b7309d9fe1d140234950dff682b07da9f2b1766e4a5e4eaa"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.063453 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" event={"ID":"6105379b-1fb8-4384-b6d5-67b4db5498e5","Type":"ContainerStarted","Data":"85d0d6abce5c7c60763b91d3ede66b06784e5c65356656df7bc6be0d6f9db9a7"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.063496 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" event={"ID":"6105379b-1fb8-4384-b6d5-67b4db5498e5","Type":"ContainerStarted","Data":"c670d8e34c5a7ba8a361c2df7d285722f7596d183d3439a80936dff4cd2531db"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.063509 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.074033 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podStartSLOduration=74.074019626 podStartE2EDuration="1m14.074019626s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.070767283 +0000 UTC m=+93.695447142" watchObservedRunningTime="2026-01-23 10:50:29.074019626 +0000 UTC m=+93.698699485" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.076525 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.084223 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.091044 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kz8dz" event={"ID":"484d0401-5634-42e8-b09e-8c7eb65aa84c","Type":"ContainerStarted","Data":"6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.094454 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" event={"ID":"babc1ea3-3d75-429d-b5a1-c966c652d827","Type":"ContainerStarted","Data":"b8b281a6a0e148ddab44c7195ce4d7447152ba9b40c571af4d7ee33532b88c94"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.094491 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" event={"ID":"babc1ea3-3d75-429d-b5a1-c966c652d827","Type":"ContainerStarted","Data":"4eefc32c4dad8b43d7f33caf55f7bacd9b9a817b7f39c4784f1a3540ff96d4f7"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.099422 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podStartSLOduration=74.099409147 podStartE2EDuration="1m14.099409147s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.099048828 +0000 UTC m=+93.723728687" watchObservedRunningTime="2026-01-23 10:50:29.099409147 +0000 UTC m=+93.724089006" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.100946 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.102532 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.602520226 +0000 UTC m=+94.227200085 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.123742 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" podStartSLOduration=74.123727 podStartE2EDuration="1m14.123727s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.123555286 +0000 UTC m=+93.748235145" watchObservedRunningTime="2026-01-23 10:50:29.123727 +0000 UTC m=+93.748406859" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.138550 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" event={"ID":"9e8ec1f3-5f7f-4150-82e2-34b2f4910385","Type":"ContainerStarted","Data":"ae53b5bf555f0f34d69a4028ac38c5e675df395b21c5bd362fa18c339f207ede"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.158571 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-kz8dz" podStartSLOduration=74.158558113 podStartE2EDuration="1m14.158558113s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.158488062 +0000 UTC m=+93.783167921" watchObservedRunningTime="2026-01-23 10:50:29.158558113 +0000 UTC m=+93.783237972" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.178091 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" event={"ID":"c83ad550-7576-440f-bac5-8308b6c801b0","Type":"ContainerStarted","Data":"9150f9d267f2d3d18e29021e4b3549dfc8d5cdc74e9f7f595819631794282cf0"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.205410 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.205661 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.705643521 +0000 UTC m=+94.330323380 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.205699 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.207092 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.707079998 +0000 UTC m=+94.331759857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.213452 4689 generic.go:334] "Generic (PLEG): container finished" podID="05112afa-e07a-4342-9d37-2a6b9b7ac9a2" containerID="5597b978a9a865862985879fb574ff1de65296f6eba044cb738ea0719869944d" exitCode=0 Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.213543 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" event={"ID":"05112afa-e07a-4342-9d37-2a6b9b7ac9a2","Type":"ContainerDied","Data":"5597b978a9a865862985879fb574ff1de65296f6eba044cb738ea0719869944d"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.229591 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" podStartSLOduration=75.229577935 podStartE2EDuration="1m15.229577935s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.228509527 +0000 UTC m=+93.853189386" watchObservedRunningTime="2026-01-23 10:50:29.229577935 +0000 UTC m=+93.854257784" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.230504 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podStartSLOduration=74.230499909 podStartE2EDuration="1m14.230499909s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.189777755 +0000 UTC m=+93.814457614" watchObservedRunningTime="2026-01-23 10:50:29.230499909 +0000 UTC m=+93.855179768" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.249913 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" event={"ID":"7e63a4b4-9425-4409-9b0b-291e771cb38a","Type":"ContainerStarted","Data":"447044d9aa3e29f540b430244b8f8b7509ebc6be13ea49c785902a229f707ace"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.295523 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-z8tdh" event={"ID":"08fe053a-867d-498a-a459-96916291511e","Type":"ContainerStarted","Data":"abfc0b8e433c68956991d36299daeff75ca1a3a86ffd71f47014544b563e5d26"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.306944 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.310030 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.810004417 +0000 UTC m=+94.434684286 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.311013 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" podStartSLOduration=74.310997513 podStartE2EDuration="1m14.310997513s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.31048439 +0000 UTC m=+93.935164249" watchObservedRunningTime="2026-01-23 10:50:29.310997513 +0000 UTC m=+93.935677372" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.317629 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.317678 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.348467 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.348873 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" event={"ID":"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47","Type":"ContainerStarted","Data":"a834ee61db545188c1392c06902ba826dbb926218615b6ff16fdd83c30bc23ed"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.368705 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-w9zjb" event={"ID":"9de9798d-d640-4f37-aee7-9db1fa7c4289","Type":"ContainerStarted","Data":"c0271cb9ccfcf278b7637ad4d6847e863e8a62656239274304b2d933fd974f75"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.368747 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-w9zjb" event={"ID":"9de9798d-d640-4f37-aee7-9db1fa7c4289","Type":"ContainerStarted","Data":"fd08e408fbf48e3132bd38608feb53866ab88534839e4c419dd52b0fc1249199"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.376655 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" event={"ID":"83fb5834-aa38-45d6-bce1-65ab7968a75d","Type":"ContainerStarted","Data":"d3094f7a762c4effad398e9e163e269aeaa590cbd158aca81343a10c611fe878"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.399885 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" event={"ID":"585f99c4-5f5e-4caf-9b9f-31a7f666bea8","Type":"ContainerStarted","Data":"3802b9f725815de07e34f94030bad4c4680e3406a31e2434d8d604d5ab9e6d08"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.400397 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.401783 4689 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-97xbl container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.401834 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" podUID="585f99c4-5f5e-4caf-9b9f-31a7f666bea8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.409041 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.409443 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:29.909428277 +0000 UTC m=+94.534108186 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.417523 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" event={"ID":"098f2e03-62e1-4e09-aa34-65d45a757c56","Type":"ContainerStarted","Data":"f4990a34ea3c6757aaebe471132ddd5976e53a7d695efdf4b7b6a5743220f4b2"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.417569 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" event={"ID":"098f2e03-62e1-4e09-aa34-65d45a757c56","Type":"ContainerStarted","Data":"c7bed4f3cec05f4fcf33ee1fe5d8323bda03e42af17e9483f6d3d7161896edc5"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.437520 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" event={"ID":"e30e5a25-1fad-49ec-9fe8-263ab088fc64","Type":"ContainerStarted","Data":"a49e50e8360f22cfb5b3a14508538f0ffbfa8628a9c0b8ac23b314e83af12716"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.437769 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" event={"ID":"e30e5a25-1fad-49ec-9fe8-263ab088fc64","Type":"ContainerStarted","Data":"4976fd872c99f8cff4a280af711289fc6d771ca7cb4ae3b91007c360bc4c7913"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.445251 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-w9zjb" podStartSLOduration=7.445233776 podStartE2EDuration="7.445233776s" podCreationTimestamp="2026-01-23 10:50:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.401222178 +0000 UTC m=+94.025902037" watchObservedRunningTime="2026-01-23 10:50:29.445233776 +0000 UTC m=+94.069913635" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.445654 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-tmsmx" podStartSLOduration=75.445648816 podStartE2EDuration="1m15.445648816s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.444629781 +0000 UTC m=+94.069309640" watchObservedRunningTime="2026-01-23 10:50:29.445648816 +0000 UTC m=+94.070328675" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.491966 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" podStartSLOduration=74.491953314 podStartE2EDuration="1m14.491953314s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.490200039 +0000 UTC m=+94.114879898" watchObservedRunningTime="2026-01-23 10:50:29.491953314 +0000 UTC m=+94.116633173" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.508406 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" event={"ID":"ceaf3a09-638c-4a67-ab8c-f86103f9d359","Type":"ContainerStarted","Data":"aa290598ff8d0c34e319c6e5f7590c13c2b4775f98ada9686cb641e674ce8ddb"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.512116 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.514453 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.01442994 +0000 UTC m=+94.639109869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.535405 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" event={"ID":"da2befb5-074b-4665-b2ed-23d3f1b1df31","Type":"ContainerStarted","Data":"9dc5d0fb380b77c976471f2d8ebbac1760649b766bdf3d7ce660ac17e569d544"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.535468 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" event={"ID":"da2befb5-074b-4665-b2ed-23d3f1b1df31","Type":"ContainerStarted","Data":"c1b0cfc4cd3ce82e0fa9d46abd212d461bd7f0df52c3015e374cb02cbf009297"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.543601 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n65mf" podStartSLOduration=74.543588358 podStartE2EDuration="1m14.543588358s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.527593958 +0000 UTC m=+94.152273817" watchObservedRunningTime="2026-01-23 10:50:29.543588358 +0000 UTC m=+94.168268217" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.587579 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" event={"ID":"b68417f8-3be8-499d-941c-f2b885b3a467","Type":"ContainerStarted","Data":"9a832b5010d70cb0c82097e9aaa3ea44c7d642cb9323b75bea674e18febbf11f"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.591512 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-49rnx" podStartSLOduration=74.591499007 podStartE2EDuration="1m14.591499007s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.589103836 +0000 UTC m=+94.213783695" watchObservedRunningTime="2026-01-23 10:50:29.591499007 +0000 UTC m=+94.216178866" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.614851 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.616162 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.116135819 +0000 UTC m=+94.740815678 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.627442 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qvgvs" event={"ID":"c7b42454-f36b-4ab0-86d0-a2decba67e28","Type":"ContainerStarted","Data":"a7012d420ee0f260434af4fb872249a22e20f5d91f87e6793846ecefb4818c06"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.638315 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" event={"ID":"bd5cf083-96b6-4cc8-818a-ca50c4689165","Type":"ContainerStarted","Data":"1a306fdbf86bcf5f99cf4058f2e883d470b0b9f90207ebe425e9016d54fd7cae"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.671283 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerStarted","Data":"0133f0cf1973c81f74110e5c344e35ac2a5efe93bb140e3a2bd9b52e73d1f92a"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.671322 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" event={"ID":"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b","Type":"ContainerStarted","Data":"b6e18d055c09e2cc031f958d233d08684b7435fb4470cf6b0724d1ceab91d070"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.676866 4689 generic.go:334] "Generic (PLEG): container finished" podID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerID="bf8fe9d4568c24afb8f605a823a93d16516fe9d6f35d07742e6c11f0a80b9d2b" exitCode=0 Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.677950 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" event={"ID":"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3","Type":"ContainerDied","Data":"bf8fe9d4568c24afb8f605a823a93d16516fe9d6f35d07742e6c11f0a80b9d2b"} Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.687141 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" podStartSLOduration=74.68712102 podStartE2EDuration="1m14.68712102s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.634331535 +0000 UTC m=+94.259011384" watchObservedRunningTime="2026-01-23 10:50:29.68712102 +0000 UTC m=+94.311800879" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.696380 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.717627 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.718972 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.218950096 +0000 UTC m=+94.843629955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.722696 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" podStartSLOduration=74.722676171 podStartE2EDuration="1m14.722676171s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.686595415 +0000 UTC m=+94.311275274" watchObservedRunningTime="2026-01-23 10:50:29.722676171 +0000 UTC m=+94.347356030" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.739176 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" podStartSLOduration=74.739133993 podStartE2EDuration="1m14.739133993s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.726351846 +0000 UTC m=+94.351031705" watchObservedRunningTime="2026-01-23 10:50:29.739133993 +0000 UTC m=+94.363813852" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.763593 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" podStartSLOduration=74.76357205 podStartE2EDuration="1m14.76357205s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.754469827 +0000 UTC m=+94.379149686" watchObservedRunningTime="2026-01-23 10:50:29.76357205 +0000 UTC m=+94.388251899" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.789010 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-qvgvs" podStartSLOduration=74.788993632 podStartE2EDuration="1m14.788993632s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.782588868 +0000 UTC m=+94.407268737" watchObservedRunningTime="2026-01-23 10:50:29.788993632 +0000 UTC m=+94.413673491" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.826362 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.826781 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.326768801 +0000 UTC m=+94.951448660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.861262 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" podStartSLOduration=74.861240525 podStartE2EDuration="1m14.861240525s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:29.845885901 +0000 UTC m=+94.470565760" watchObservedRunningTime="2026-01-23 10:50:29.861240525 +0000 UTC m=+94.485920384" Jan 23 10:50:29 crc kubenswrapper[4689]: I0123 10:50:29.932203 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:29 crc kubenswrapper[4689]: E0123 10:50:29.932636 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.432620435 +0000 UTC m=+95.057300294 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.033894 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.034678 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.534662292 +0000 UTC m=+95.159342151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.135673 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.135872 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.635846467 +0000 UTC m=+95.260526326 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.135987 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.136323 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.63631171 +0000 UTC m=+95.260991569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.237246 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.237409 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.737384542 +0000 UTC m=+95.362064401 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.237511 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.237814 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.737806902 +0000 UTC m=+95.362486761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.245481 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.252049 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 10:50:30 crc kubenswrapper[4689]: [-]has-synced failed: reason withheld Jan 23 10:50:30 crc kubenswrapper[4689]: [+]process-running ok Jan 23 10:50:30 crc kubenswrapper[4689]: healthz check failed Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.252115 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.338692 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.338894 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.838868204 +0000 UTC m=+95.463548063 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.338965 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.339342 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.839325536 +0000 UTC m=+95.464005395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.439945 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.440139 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.940114031 +0000 UTC m=+95.564793890 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.440232 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.440543 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:30.940534491 +0000 UTC m=+95.565214400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.541188 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.541396 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.041365717 +0000 UTC m=+95.666045576 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.541869 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.542236 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.042211578 +0000 UTC m=+95.666891437 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.643161 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.643340 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.143315712 +0000 UTC m=+95.767995571 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.643493 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.643803 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.143791054 +0000 UTC m=+95.768470913 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.683167 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gwfkj" event={"ID":"b68417f8-3be8-499d-941c-f2b885b3a467","Type":"ContainerStarted","Data":"7af2783ffb6ceac18a267e95b9b3daafea800823515da6df89a7e263cad55f6b"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.689242 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" event={"ID":"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3","Type":"ContainerStarted","Data":"2cc243468ad8665b4783027e70c09b12e489c4842e28679a5bcbefdbef595e4b"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.689992 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.698867 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" event={"ID":"f9dd4a75-d3a3-42d6-a305-43e95e450611","Type":"ContainerStarted","Data":"3de974a3877b9a09e6527379e5e4d1e09cdb486c9d914572bfd81537fbde208f"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.699112 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.701688 4689 generic.go:334] "Generic (PLEG): container finished" podID="c83ad550-7576-440f-bac5-8308b6c801b0" containerID="9150f9d267f2d3d18e29021e4b3549dfc8d5cdc74e9f7f595819631794282cf0" exitCode=0 Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.701730 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" event={"ID":"c83ad550-7576-440f-bac5-8308b6c801b0","Type":"ContainerDied","Data":"9150f9d267f2d3d18e29021e4b3549dfc8d5cdc74e9f7f595819631794282cf0"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.707960 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" event={"ID":"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47","Type":"ContainerStarted","Data":"79d0cfd699eb3ed774e82aaad63ecee2808b8c7c528a9b4d0995688606bd8ca1"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.708002 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" event={"ID":"dc66c81d-1ea4-494e-99d4-1fbdb64a4a47","Type":"ContainerStarted","Data":"2d90ed3763d0bc71b32bb1c263e401c26b566a9c2a11f4b2988bc10f8f08351f"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.712701 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6lzwm" event={"ID":"bd5cf083-96b6-4cc8-818a-ca50c4689165","Type":"ContainerStarted","Data":"f8912f00e08fdf071987b2d546aca59dc052d67c16d47b17958bc62168bc7915"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.717341 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.723523 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-bcppn" event={"ID":"7e63a4b4-9425-4409-9b0b-291e771cb38a","Type":"ContainerStarted","Data":"7cd27b48e99441cfc4440b90397ef6bb1af52ae1d58d50a48064601417739540"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.728994 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jt8ww" event={"ID":"babc1ea3-3d75-429d-b5a1-c966c652d827","Type":"ContainerStarted","Data":"b75468fa1b7c09a39c3a9d50b47ac604b4d87af1a38abc133de89e007a3d1795"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.731026 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podStartSLOduration=76.7310075 podStartE2EDuration="1m16.7310075s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:30.730481057 +0000 UTC m=+95.355160916" watchObservedRunningTime="2026-01-23 10:50:30.7310075 +0000 UTC m=+95.355687359" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.738046 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" event={"ID":"05112afa-e07a-4342-9d37-2a6b9b7ac9a2","Type":"ContainerStarted","Data":"1b8fe7fddd431a4079b6f9bad312ef0692f0e20d9c62fbb2b4e3b12a5745c51e"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.738110 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" event={"ID":"05112afa-e07a-4342-9d37-2a6b9b7ac9a2","Type":"ContainerStarted","Data":"4fe57f88d86492f68db0cfc7e885d557c868f6d7919184a255ace6e530fc91d4"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.743647 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" event={"ID":"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b","Type":"ContainerStarted","Data":"0ecc75bcdc6958184600aa04e1303670dfbb4d9b5ed4bf60da9b0212e8ac3240"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.743690 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-dtkdv" event={"ID":"cab4ae0f-8813-48bc-b9a7-0bfb40776d0b","Type":"ContainerStarted","Data":"05848c5590225c8007cda8674fa4dd9acb1c8cba8097fb0fa0ba3e5116d8b0cb"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.744381 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.745458 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.245439061 +0000 UTC m=+95.870118920 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.762401 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-z8tdh" event={"ID":"08fe053a-867d-498a-a459-96916291511e","Type":"ContainerStarted","Data":"2a0c6849651ec6d50da90566ef73f3ed3545120a227a3de69727245852031601"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.762444 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-z8tdh" event={"ID":"08fe053a-867d-498a-a459-96916291511e","Type":"ContainerStarted","Data":"deb8d56286af9788334b57087a57b552a23584d3c6cd6cb26718e2729f47881c"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.762982 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.775380 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-945fj" event={"ID":"da2befb5-074b-4665-b2ed-23d3f1b1df31","Type":"ContainerStarted","Data":"13f7d82387ad9e7ee7b9a3445b7f979af53a62f98a9338d399a022349e366bbf"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.778882 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerStarted","Data":"e295e7e376182811e89576ca1ac9bee6723a6ed06794e2edfdf3a7247ce6c5d8"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.778917 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerStarted","Data":"3eadc0f88cda2375616ec2db9c2d200b9461f716dcd8caaf83f49b0f88e38114"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.784575 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rkx2p" event={"ID":"ceaf3a09-638c-4a67-ab8c-f86103f9d359","Type":"ContainerStarted","Data":"fa15d551f14dc75ec537881fb3f31048baae0d38cab894f2190a142c5d5bf9c7"} Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.789980 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.790034 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.791936 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.817340 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.847314 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.851117 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.351100541 +0000 UTC m=+95.975780490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.860006 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-pmksl" podStartSLOduration=75.859987108 podStartE2EDuration="1m15.859987108s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:30.857615128 +0000 UTC m=+95.482294987" watchObservedRunningTime="2026-01-23 10:50:30.859987108 +0000 UTC m=+95.484666967" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.860911 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podStartSLOduration=75.860719417 podStartE2EDuration="1m15.860719417s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:30.810463678 +0000 UTC m=+95.435143567" watchObservedRunningTime="2026-01-23 10:50:30.860719417 +0000 UTC m=+95.485399266" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.904702 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.921897 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-z8tdh" podStartSLOduration=8.921875076 podStartE2EDuration="8.921875076s" podCreationTimestamp="2026-01-23 10:50:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:30.921707902 +0000 UTC m=+95.546387761" watchObservedRunningTime="2026-01-23 10:50:30.921875076 +0000 UTC m=+95.546554945" Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.949962 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:30 crc kubenswrapper[4689]: E0123 10:50:30.950392 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.450372227 +0000 UTC m=+96.075052096 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:30 crc kubenswrapper[4689]: I0123 10:50:30.989119 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" podStartSLOduration=76.98909929 podStartE2EDuration="1m16.98909929s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:30.958792683 +0000 UTC m=+95.583472542" watchObservedRunningTime="2026-01-23 10:50:30.98909929 +0000 UTC m=+95.613779149" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.051141 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.051493 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.551479939 +0000 UTC m=+96.176159798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.138108 4689 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.152420 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.152844 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.652825209 +0000 UTC m=+96.277505078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.249762 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 10:50:31 crc kubenswrapper[4689]: [-]has-synced failed: reason withheld Jan 23 10:50:31 crc kubenswrapper[4689]: [+]process-running ok Jan 23 10:50:31 crc kubenswrapper[4689]: healthz check failed Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.249819 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.254086 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.254441 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.754429624 +0000 UTC m=+96.379109473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.355335 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.355477 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.855451295 +0000 UTC m=+96.480131154 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.355717 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.356111 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.856094652 +0000 UTC m=+96.480774511 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.456206 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vdzr6"] Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.456666 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.456841 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.956814345 +0000 UTC m=+96.581494204 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.457019 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.457129 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.457366 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:31.957350979 +0000 UTC m=+96.582030888 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.460930 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.507523 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdzr6"] Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.558027 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.558236 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-utilities\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.558306 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d2kj\" (UniqueName: \"kubernetes.io/projected/39c4a693-cb3e-49c8-9515-5b11f02093e0-kube-api-access-8d2kj\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.558325 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-catalog-content\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.558409 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:32.05839517 +0000 UTC m=+96.683075029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.657657 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-94lj8"] Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.658672 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.659070 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-utilities\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.659106 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.659175 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d2kj\" (UniqueName: \"kubernetes.io/projected/39c4a693-cb3e-49c8-9515-5b11f02093e0-kube-api-access-8d2kj\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.659197 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-catalog-content\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.659471 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:32.159453291 +0000 UTC m=+96.784133220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.659485 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-utilities\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.659557 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-catalog-content\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.661325 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.668472 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-94lj8"] Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.682446 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d2kj\" (UniqueName: \"kubernetes.io/projected/39c4a693-cb3e-49c8-9515-5b11f02093e0-kube-api-access-8d2kj\") pod \"community-operators-vdzr6\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.760590 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.760746 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 10:50:32.260718219 +0000 UTC m=+96.885398078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.760808 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-utilities\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.760835 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-catalog-content\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.761083 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8jcs\" (UniqueName: \"kubernetes.io/projected/2e380655-ff7b-4f59-92ee-53074cc1b4ca-kube-api-access-v8jcs\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.761127 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: E0123 10:50:31.761445 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 10:50:32.261432447 +0000 UTC m=+96.886112306 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sbk9v" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.771758 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.811761 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerStarted","Data":"b859dce2528167c415e6815b84ddb660609b01e841a0018b5c3d844432f26392"} Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.811822 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerStarted","Data":"838b389b8cc4f32bd1171d0a66674ddae389a2d3ce6d74da48b2ec0dfd87f0d5"} Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.812626 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.812679 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.828832 4689 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-23T10:50:31.138138642Z","Handler":null,"Name":""} Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.835591 4689 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.835640 4689 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.845557 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podStartSLOduration=9.845542244 podStartE2EDuration="9.845542244s" podCreationTimestamp="2026-01-23 10:50:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:31.844377575 +0000 UTC m=+96.469057454" watchObservedRunningTime="2026-01-23 10:50:31.845542244 +0000 UTC m=+96.470222103" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.861697 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.862560 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j8pkz"] Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.862693 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-utilities\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.862727 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-catalog-content\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.863227 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8jcs\" (UniqueName: \"kubernetes.io/projected/2e380655-ff7b-4f59-92ee-53074cc1b4ca-kube-api-access-v8jcs\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.863564 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-catalog-content\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.863716 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.864533 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-utilities\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.869664 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.884959 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j8pkz"] Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.893931 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8jcs\" (UniqueName: \"kubernetes.io/projected/2e380655-ff7b-4f59-92ee-53074cc1b4ca-kube-api-access-v8jcs\") pod \"certified-operators-94lj8\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.966591 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-catalog-content\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.966675 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mflsz\" (UniqueName: \"kubernetes.io/projected/43400493-28cb-47ff-a065-797a27a93d58-kube-api-access-mflsz\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.966715 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.966753 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-utilities\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.981708 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.981748 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:31 crc kubenswrapper[4689]: I0123 10:50:31.995900 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.075013 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mflsz\" (UniqueName: \"kubernetes.io/projected/43400493-28cb-47ff-a065-797a27a93d58-kube-api-access-mflsz\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.075536 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-utilities\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.075610 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-catalog-content\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.076691 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-utilities\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.077125 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-catalog-content\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.088068 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-crlgp"] Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.089176 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.097036 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sbk9v\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.110753 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-crlgp"] Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.119846 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mflsz\" (UniqueName: \"kubernetes.io/projected/43400493-28cb-47ff-a065-797a27a93d58-kube-api-access-mflsz\") pod \"community-operators-j8pkz\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.139526 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.161870 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.177220 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzxd5\" (UniqueName: \"kubernetes.io/projected/d580e02f-6e70-4e5b-b78a-025d90866c97-kube-api-access-xzxd5\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.177275 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-catalog-content\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.177349 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-utilities\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.188133 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.255089 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 10:50:32 crc kubenswrapper[4689]: [-]has-synced failed: reason withheld Jan 23 10:50:32 crc kubenswrapper[4689]: [+]process-running ok Jan 23 10:50:32 crc kubenswrapper[4689]: healthz check failed Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.255133 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.281331 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c83ad550-7576-440f-bac5-8308b6c801b0-config-volume\") pod \"c83ad550-7576-440f-bac5-8308b6c801b0\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.281404 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c83ad550-7576-440f-bac5-8308b6c801b0-secret-volume\") pod \"c83ad550-7576-440f-bac5-8308b6c801b0\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.281470 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqbxk\" (UniqueName: \"kubernetes.io/projected/c83ad550-7576-440f-bac5-8308b6c801b0-kube-api-access-gqbxk\") pod \"c83ad550-7576-440f-bac5-8308b6c801b0\" (UID: \"c83ad550-7576-440f-bac5-8308b6c801b0\") " Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.281652 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzxd5\" (UniqueName: \"kubernetes.io/projected/d580e02f-6e70-4e5b-b78a-025d90866c97-kube-api-access-xzxd5\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.281685 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-catalog-content\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.281776 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-utilities\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.282016 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c83ad550-7576-440f-bac5-8308b6c801b0-config-volume" (OuterVolumeSpecName: "config-volume") pod "c83ad550-7576-440f-bac5-8308b6c801b0" (UID: "c83ad550-7576-440f-bac5-8308b6c801b0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.282380 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-utilities\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.282678 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-catalog-content\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.284752 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83ad550-7576-440f-bac5-8308b6c801b0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c83ad550-7576-440f-bac5-8308b6c801b0" (UID: "c83ad550-7576-440f-bac5-8308b6c801b0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.295226 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c83ad550-7576-440f-bac5-8308b6c801b0-kube-api-access-gqbxk" (OuterVolumeSpecName: "kube-api-access-gqbxk") pod "c83ad550-7576-440f-bac5-8308b6c801b0" (UID: "c83ad550-7576-440f-bac5-8308b6c801b0"). InnerVolumeSpecName "kube-api-access-gqbxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.301034 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzxd5\" (UniqueName: \"kubernetes.io/projected/d580e02f-6e70-4e5b-b78a-025d90866c97-kube-api-access-xzxd5\") pod \"certified-operators-crlgp\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.302709 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-94lj8"] Jan 23 10:50:32 crc kubenswrapper[4689]: W0123 10:50:32.309372 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e380655_ff7b_4f59_92ee_53074cc1b4ca.slice/crio-fe774a8cf5a5a32b2eff3ef50dcaceb27906dd75dcf9ea162a76c4be8e8f0913 WatchSource:0}: Error finding container fe774a8cf5a5a32b2eff3ef50dcaceb27906dd75dcf9ea162a76c4be8e8f0913: Status 404 returned error can't find the container with id fe774a8cf5a5a32b2eff3ef50dcaceb27906dd75dcf9ea162a76c4be8e8f0913 Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.382985 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c83ad550-7576-440f-bac5-8308b6c801b0-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.383016 4689 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c83ad550-7576-440f-bac5-8308b6c801b0-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.383026 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqbxk\" (UniqueName: \"kubernetes.io/projected/c83ad550-7576-440f-bac5-8308b6c801b0-kube-api-access-gqbxk\") on node \"crc\" DevicePath \"\"" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.436432 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.445203 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdzr6"] Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.485891 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j8pkz"] Jan 23 10:50:32 crc kubenswrapper[4689]: W0123 10:50:32.498481 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43400493_28cb_47ff_a065_797a27a93d58.slice/crio-92a4798b38d155e18291af2f5d7fdd92f540ca697cc784eed42ec2260a1bf974 WatchSource:0}: Error finding container 92a4798b38d155e18291af2f5d7fdd92f540ca697cc784eed42ec2260a1bf974: Status 404 returned error can't find the container with id 92a4798b38d155e18291af2f5d7fdd92f540ca697cc784eed42ec2260a1bf974 Jan 23 10:50:32 crc kubenswrapper[4689]: W0123 10:50:32.500365 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39c4a693_cb3e_49c8_9515_5b11f02093e0.slice/crio-fe2845691e7ac53220aab58b9a52195236fec29ef4aeef007ba387c969df9f1c WatchSource:0}: Error finding container fe2845691e7ac53220aab58b9a52195236fec29ef4aeef007ba387c969df9f1c: Status 404 returned error can't find the container with id fe2845691e7ac53220aab58b9a52195236fec29ef4aeef007ba387c969df9f1c Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.616777 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sbk9v"] Jan 23 10:50:32 crc kubenswrapper[4689]: W0123 10:50:32.633220 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9abf0120_b5d1_4f43_871d_b73a24382940.slice/crio-45cebe81ea273c37702d1d66549c7e5b90e171c0057babb6427f85bc3667b2b3 WatchSource:0}: Error finding container 45cebe81ea273c37702d1d66549c7e5b90e171c0057babb6427f85bc3667b2b3: Status 404 returned error can't find the container with id 45cebe81ea273c37702d1d66549c7e5b90e171c0057babb6427f85bc3667b2b3 Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.686806 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-crlgp"] Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.828545 4689 generic.go:334] "Generic (PLEG): container finished" podID="43400493-28cb-47ff-a065-797a27a93d58" containerID="34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0" exitCode=0 Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.828622 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8pkz" event={"ID":"43400493-28cb-47ff-a065-797a27a93d58","Type":"ContainerDied","Data":"34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.828647 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8pkz" event={"ID":"43400493-28cb-47ff-a065-797a27a93d58","Type":"ContainerStarted","Data":"92a4798b38d155e18291af2f5d7fdd92f540ca697cc784eed42ec2260a1bf974"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.838691 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.846558 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-crlgp" event={"ID":"d580e02f-6e70-4e5b-b78a-025d90866c97","Type":"ContainerStarted","Data":"5b5c75aaba6657d2be939f1835d73eb11f37f25fe4b8470d0d3907a179a32a42"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.850059 4689 generic.go:334] "Generic (PLEG): container finished" podID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerID="bea4fa2a0212e24df053ba7bb7d573841ceb8d14ce26f577c5c105480d49e97c" exitCode=0 Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.850131 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94lj8" event={"ID":"2e380655-ff7b-4f59-92ee-53074cc1b4ca","Type":"ContainerDied","Data":"bea4fa2a0212e24df053ba7bb7d573841ceb8d14ce26f577c5c105480d49e97c"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.850185 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94lj8" event={"ID":"2e380655-ff7b-4f59-92ee-53074cc1b4ca","Type":"ContainerStarted","Data":"fe774a8cf5a5a32b2eff3ef50dcaceb27906dd75dcf9ea162a76c4be8e8f0913"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.856840 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" event={"ID":"9abf0120-b5d1-4f43-871d-b73a24382940","Type":"ContainerStarted","Data":"45cebe81ea273c37702d1d66549c7e5b90e171c0057babb6427f85bc3667b2b3"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.858828 4689 generic.go:334] "Generic (PLEG): container finished" podID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerID="aec784f024161a346ae7dfc12c002d47bde5ff706792e104ca2a3d69228392c7" exitCode=0 Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.858903 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdzr6" event={"ID":"39c4a693-cb3e-49c8-9515-5b11f02093e0","Type":"ContainerDied","Data":"aec784f024161a346ae7dfc12c002d47bde5ff706792e104ca2a3d69228392c7"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.858933 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdzr6" event={"ID":"39c4a693-cb3e-49c8-9515-5b11f02093e0","Type":"ContainerStarted","Data":"fe2845691e7ac53220aab58b9a52195236fec29ef4aeef007ba387c969df9f1c"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.863851 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.865357 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx" event={"ID":"c83ad550-7576-440f-bac5-8308b6c801b0","Type":"ContainerDied","Data":"1b86e57f0a95ec6a16c8c17bdc30462239bd9c532fd62aca399fbdd43ac37525"} Jan 23 10:50:32 crc kubenswrapper[4689]: I0123 10:50:32.865649 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b86e57f0a95ec6a16c8c17bdc30462239bd9c532fd62aca399fbdd43ac37525" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.255444 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 10:50:33 crc kubenswrapper[4689]: [-]has-synced failed: reason withheld Jan 23 10:50:33 crc kubenswrapper[4689]: [+]process-running ok Jan 23 10:50:33 crc kubenswrapper[4689]: healthz check failed Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.255516 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.451625 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mfvh2"] Jan 23 10:50:33 crc kubenswrapper[4689]: E0123 10:50:33.452106 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c83ad550-7576-440f-bac5-8308b6c801b0" containerName="collect-profiles" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.452252 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c83ad550-7576-440f-bac5-8308b6c801b0" containerName="collect-profiles" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.452421 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c83ad550-7576-440f-bac5-8308b6c801b0" containerName="collect-profiles" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.453190 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.457091 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.466758 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.494252 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-catalog-content\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.494415 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzj22\" (UniqueName: \"kubernetes.io/projected/00567441-00cd-4594-a8bd-93db333e1a18-kube-api-access-jzj22\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.494444 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-utilities\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.533311 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mfvh2"] Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.595746 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzj22\" (UniqueName: \"kubernetes.io/projected/00567441-00cd-4594-a8bd-93db333e1a18-kube-api-access-jzj22\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.595791 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-utilities\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.595816 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-catalog-content\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.871548 4689 generic.go:334] "Generic (PLEG): container finished" podID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerID="ccb2773c4d019338930b65048e47b85e19e66f6cfe01080df594624fe6c00171" exitCode=0 Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.907195 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-catalog-content\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.907791 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-utilities\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.908039 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.908837 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nwt56"] Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.910137 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-crlgp" event={"ID":"d580e02f-6e70-4e5b-b78a-025d90866c97","Type":"ContainerDied","Data":"ccb2773c4d019338930b65048e47b85e19e66f6cfe01080df594624fe6c00171"} Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.910228 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" event={"ID":"9abf0120-b5d1-4f43-871d-b73a24382940","Type":"ContainerStarted","Data":"df0966327f05d6988e46dfb2c36a75b123792e5dc23bc9a146c847548a606c24"} Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.910260 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.910283 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwt56"] Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.910393 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.923384 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzj22\" (UniqueName: \"kubernetes.io/projected/00567441-00cd-4594-a8bd-93db333e1a18-kube-api-access-jzj22\") pod \"redhat-marketplace-mfvh2\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:33 crc kubenswrapper[4689]: I0123 10:50:33.935852 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" podStartSLOduration=78.935830602 podStartE2EDuration="1m18.935830602s" podCreationTimestamp="2026-01-23 10:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:33.934061897 +0000 UTC m=+98.558741756" watchObservedRunningTime="2026-01-23 10:50:33.935830602 +0000 UTC m=+98.560510461" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.000446 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.000703 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-catalog-content\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.001020 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gzll\" (UniqueName: \"kubernetes.io/projected/d7315448-2c12-48b1-b49d-872cb9881a73-kube-api-access-4gzll\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.001124 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-utilities\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.012113 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/01ee7060-466f-4294-934f-3df3b9aa7afe-metrics-certs\") pod \"network-metrics-daemon-cpc6c\" (UID: \"01ee7060-466f-4294-934f-3df3b9aa7afe\") " pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.067842 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.101921 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-catalog-content\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.101976 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-utilities\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.101999 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gzll\" (UniqueName: \"kubernetes.io/projected/d7315448-2c12-48b1-b49d-872cb9881a73-kube-api-access-4gzll\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.103930 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-utilities\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.103958 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-catalog-content\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.120073 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gzll\" (UniqueName: \"kubernetes.io/projected/d7315448-2c12-48b1-b49d-872cb9881a73-kube-api-access-4gzll\") pod \"redhat-marketplace-nwt56\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.170492 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cpc6c" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.250120 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 10:50:34 crc kubenswrapper[4689]: [-]has-synced failed: reason withheld Jan 23 10:50:34 crc kubenswrapper[4689]: [+]process-running ok Jan 23 10:50:34 crc kubenswrapper[4689]: healthz check failed Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.250190 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.264390 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.427245 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-cpc6c"] Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.477880 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.477932 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.478201 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.478221 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.496404 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mfvh2"] Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.561538 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.562354 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.562826 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.564607 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.564802 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.570130 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwt56"] Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.739343 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.739380 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.744894 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48f84bb1-698b-4e46-8626-bef4af596325-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.744953 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/48f84bb1-698b-4e46-8626-bef4af596325-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.764938 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5x86b"] Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.765038 4689 patch_prober.go:28] interesting pod/console-f9d7485db-kz8dz container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.765084 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-kz8dz" podUID="484d0401-5634-42e8-b09e-8c7eb65aa84c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.770673 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.770724 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.771516 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.786908 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.787482 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5x86b"] Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.792344 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.846503 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-catalog-content\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.846695 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48f84bb1-698b-4e46-8626-bef4af596325-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.846742 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/48f84bb1-698b-4e46-8626-bef4af596325-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.846765 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-utilities\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.846782 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qk4t\" (UniqueName: \"kubernetes.io/projected/3f3d6207-0bc4-441d-bf97-406ec30d09a1-kube-api-access-7qk4t\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.847251 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/48f84bb1-698b-4e46-8626-bef4af596325-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.866801 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48f84bb1-698b-4e46-8626-bef4af596325-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.926859 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwt56" event={"ID":"d7315448-2c12-48b1-b49d-872cb9881a73","Type":"ContainerStarted","Data":"e7fffd0ce66f791272e4532a0a68793a26a382fada3a188bf48760d398e00113"} Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.928142 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" event={"ID":"01ee7060-466f-4294-934f-3df3b9aa7afe","Type":"ContainerStarted","Data":"175bcaa999e98837ae087271f95d93bff95963d11d321e210c356191f0d2b645"} Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.930281 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mfvh2" event={"ID":"00567441-00cd-4594-a8bd-93db333e1a18","Type":"ContainerStarted","Data":"dfc3018e1d85ab93dac33dbfba98eff2df74585c401dc26253f8d2bfe52eef30"} Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.936171 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.954063 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-utilities\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.954139 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qk4t\" (UniqueName: \"kubernetes.io/projected/3f3d6207-0bc4-441d-bf97-406ec30d09a1-kube-api-access-7qk4t\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.954281 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-catalog-content\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.954883 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-catalog-content\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.955919 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-utilities\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:34 crc kubenswrapper[4689]: I0123 10:50:34.999421 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qk4t\" (UniqueName: \"kubernetes.io/projected/3f3d6207-0bc4-441d-bf97-406ec30d09a1-kube-api-access-7qk4t\") pod \"redhat-operators-5x86b\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.051842 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nzrpf"] Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.053133 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.080185 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzrpf"] Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.144962 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.146386 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.157258 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-utilities\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.157306 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-catalog-content\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.157352 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxg4j\" (UniqueName: \"kubernetes.io/projected/90aea647-917c-4e24-ada7-0b2ee683bd4f-kube-api-access-sxg4j\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.157472 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.245627 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.249208 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 10:50:35 crc kubenswrapper[4689]: [-]has-synced failed: reason withheld Jan 23 10:50:35 crc kubenswrapper[4689]: [+]process-running ok Jan 23 10:50:35 crc kubenswrapper[4689]: healthz check failed Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.249266 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.262355 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxg4j\" (UniqueName: \"kubernetes.io/projected/90aea647-917c-4e24-ada7-0b2ee683bd4f-kube-api-access-sxg4j\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.262513 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-utilities\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.262634 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-catalog-content\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.266499 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-catalog-content\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.277357 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-utilities\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.298060 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxg4j\" (UniqueName: \"kubernetes.io/projected/90aea647-917c-4e24-ada7-0b2ee683bd4f-kube-api-access-sxg4j\") pod \"redhat-operators-nzrpf\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.383488 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.634327 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 23 10:50:35 crc kubenswrapper[4689]: W0123 10:50:35.764118 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod48f84bb1_698b_4e46_8626_bef4af596325.slice/crio-f457fcd72a5bc8c9380e83386c1cd1ae62eefcff5af5d393546ea7ce6145c775 WatchSource:0}: Error finding container f457fcd72a5bc8c9380e83386c1cd1ae62eefcff5af5d393546ea7ce6145c775: Status 404 returned error can't find the container with id f457fcd72a5bc8c9380e83386c1cd1ae62eefcff5af5d393546ea7ce6145c775 Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.920674 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzrpf"] Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.959638 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"48f84bb1-698b-4e46-8626-bef4af596325","Type":"ContainerStarted","Data":"f457fcd72a5bc8c9380e83386c1cd1ae62eefcff5af5d393546ea7ce6145c775"} Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.963277 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzrpf" event={"ID":"90aea647-917c-4e24-ada7-0b2ee683bd4f","Type":"ContainerStarted","Data":"1b58312a5c246540dd03a5c687199402391e3522c0dfe4ffdad7a7e1a782569f"} Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.979435 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" event={"ID":"01ee7060-466f-4294-934f-3df3b9aa7afe","Type":"ContainerStarted","Data":"ede6ff883ced3e250ced265796be3905e2306ee309685ee8c611accba366d9db"} Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.979493 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cpc6c" event={"ID":"01ee7060-466f-4294-934f-3df3b9aa7afe","Type":"ContainerStarted","Data":"b6657cadba065529f93b1c6b911c31faf0f4af1d11ccc2033d2536d4067553bc"} Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.991437 4689 generic.go:334] "Generic (PLEG): container finished" podID="00567441-00cd-4594-a8bd-93db333e1a18" containerID="fa39da2fe7c6e7fa5082051f39bc80eaca6840334eefa6caf6fa46acaa688d29" exitCode=0 Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.991604 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mfvh2" event={"ID":"00567441-00cd-4594-a8bd-93db333e1a18","Type":"ContainerDied","Data":"fa39da2fe7c6e7fa5082051f39bc80eaca6840334eefa6caf6fa46acaa688d29"} Jan 23 10:50:35 crc kubenswrapper[4689]: I0123 10:50:35.995779 4689 generic.go:334] "Generic (PLEG): container finished" podID="d7315448-2c12-48b1-b49d-872cb9881a73" containerID="9ed8f4865819532d8b2fdf0b1ebb3681b4a1fa296b8e335488913a0bde10f897" exitCode=0 Jan 23 10:50:36 crc kubenswrapper[4689]: I0123 10:50:35.997042 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwt56" event={"ID":"d7315448-2c12-48b1-b49d-872cb9881a73","Type":"ContainerDied","Data":"9ed8f4865819532d8b2fdf0b1ebb3681b4a1fa296b8e335488913a0bde10f897"} Jan 23 10:50:36 crc kubenswrapper[4689]: I0123 10:50:36.004564 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-cpc6c" podStartSLOduration=82.004541616 podStartE2EDuration="1m22.004541616s" podCreationTimestamp="2026-01-23 10:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:35.999660671 +0000 UTC m=+100.624340550" watchObservedRunningTime="2026-01-23 10:50:36.004541616 +0000 UTC m=+100.629221485" Jan 23 10:50:36 crc kubenswrapper[4689]: I0123 10:50:36.060777 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5x86b"] Jan 23 10:50:36 crc kubenswrapper[4689]: W0123 10:50:36.102232 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f3d6207_0bc4_441d_bf97_406ec30d09a1.slice/crio-250e247e2d6e0947447cf6c3795ffd1464ee624034d9a8a2ceaad0bf2a9d17bc WatchSource:0}: Error finding container 250e247e2d6e0947447cf6c3795ffd1464ee624034d9a8a2ceaad0bf2a9d17bc: Status 404 returned error can't find the container with id 250e247e2d6e0947447cf6c3795ffd1464ee624034d9a8a2ceaad0bf2a9d17bc Jan 23 10:50:36 crc kubenswrapper[4689]: I0123 10:50:36.253964 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:36 crc kubenswrapper[4689]: I0123 10:50:36.256393 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 10:50:36 crc kubenswrapper[4689]: E0123 10:50:36.896217 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod48f84bb1_698b_4e46_8626_bef4af596325.slice/crio-456fcb342f0929afb5b4cc3192e6148abcaefa2e11ec1b7e88cff2026d77677a.scope\": RecentStats: unable to find data in memory cache]" Jan 23 10:50:37 crc kubenswrapper[4689]: I0123 10:50:37.017590 4689 generic.go:334] "Generic (PLEG): container finished" podID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerID="d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6" exitCode=0 Jan 23 10:50:37 crc kubenswrapper[4689]: I0123 10:50:37.017669 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5x86b" event={"ID":"3f3d6207-0bc4-441d-bf97-406ec30d09a1","Type":"ContainerDied","Data":"d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6"} Jan 23 10:50:37 crc kubenswrapper[4689]: I0123 10:50:37.017704 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5x86b" event={"ID":"3f3d6207-0bc4-441d-bf97-406ec30d09a1","Type":"ContainerStarted","Data":"250e247e2d6e0947447cf6c3795ffd1464ee624034d9a8a2ceaad0bf2a9d17bc"} Jan 23 10:50:37 crc kubenswrapper[4689]: I0123 10:50:37.045272 4689 generic.go:334] "Generic (PLEG): container finished" podID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerID="e9164db7bd38ed676b6ee3be7b533f69350e34fd7d2579bbc3876aedb4db1524" exitCode=0 Jan 23 10:50:37 crc kubenswrapper[4689]: I0123 10:50:37.045343 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzrpf" event={"ID":"90aea647-917c-4e24-ada7-0b2ee683bd4f","Type":"ContainerDied","Data":"e9164db7bd38ed676b6ee3be7b533f69350e34fd7d2579bbc3876aedb4db1524"} Jan 23 10:50:37 crc kubenswrapper[4689]: I0123 10:50:37.069482 4689 generic.go:334] "Generic (PLEG): container finished" podID="48f84bb1-698b-4e46-8626-bef4af596325" containerID="456fcb342f0929afb5b4cc3192e6148abcaefa2e11ec1b7e88cff2026d77677a" exitCode=0 Jan 23 10:50:37 crc kubenswrapper[4689]: I0123 10:50:37.070279 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"48f84bb1-698b-4e46-8626-bef4af596325","Type":"ContainerDied","Data":"456fcb342f0929afb5b4cc3192e6148abcaefa2e11ec1b7e88cff2026d77677a"} Jan 23 10:50:38 crc kubenswrapper[4689]: I0123 10:50:38.475129 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:38 crc kubenswrapper[4689]: I0123 10:50:38.618701 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/48f84bb1-698b-4e46-8626-bef4af596325-kubelet-dir\") pod \"48f84bb1-698b-4e46-8626-bef4af596325\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " Jan 23 10:50:38 crc kubenswrapper[4689]: I0123 10:50:38.618764 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48f84bb1-698b-4e46-8626-bef4af596325-kube-api-access\") pod \"48f84bb1-698b-4e46-8626-bef4af596325\" (UID: \"48f84bb1-698b-4e46-8626-bef4af596325\") " Jan 23 10:50:38 crc kubenswrapper[4689]: I0123 10:50:38.619404 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/48f84bb1-698b-4e46-8626-bef4af596325-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "48f84bb1-698b-4e46-8626-bef4af596325" (UID: "48f84bb1-698b-4e46-8626-bef4af596325"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:50:38 crc kubenswrapper[4689]: I0123 10:50:38.644319 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48f84bb1-698b-4e46-8626-bef4af596325-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "48f84bb1-698b-4e46-8626-bef4af596325" (UID: "48f84bb1-698b-4e46-8626-bef4af596325"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:50:38 crc kubenswrapper[4689]: I0123 10:50:38.724029 4689 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/48f84bb1-698b-4e46-8626-bef4af596325-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:50:38 crc kubenswrapper[4689]: I0123 10:50:38.724065 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48f84bb1-698b-4e46-8626-bef4af596325-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:50:39 crc kubenswrapper[4689]: I0123 10:50:39.140586 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"48f84bb1-698b-4e46-8626-bef4af596325","Type":"ContainerDied","Data":"f457fcd72a5bc8c9380e83386c1cd1ae62eefcff5af5d393546ea7ce6145c775"} Jan 23 10:50:39 crc kubenswrapper[4689]: I0123 10:50:39.140658 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f457fcd72a5bc8c9380e83386c1cd1ae62eefcff5af5d393546ea7ce6145c775" Jan 23 10:50:39 crc kubenswrapper[4689]: I0123 10:50:39.140743 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.471528 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 23 10:50:40 crc kubenswrapper[4689]: E0123 10:50:40.472780 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48f84bb1-698b-4e46-8626-bef4af596325" containerName="pruner" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.472797 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="48f84bb1-698b-4e46-8626-bef4af596325" containerName="pruner" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.472894 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="48f84bb1-698b-4e46-8626-bef4af596325" containerName="pruner" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.473235 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.476587 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.476897 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.487427 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.551237 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/05d8033b-a234-4b54-a290-23623c596753-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.551293 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/05d8033b-a234-4b54-a290-23623c596753-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.622841 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-z8tdh" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.652858 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/05d8033b-a234-4b54-a290-23623c596753-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.652947 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/05d8033b-a234-4b54-a290-23623c596753-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.653086 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/05d8033b-a234-4b54-a290-23623c596753-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.673462 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/05d8033b-a234-4b54-a290-23623c596753-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:40 crc kubenswrapper[4689]: I0123 10:50:40.808429 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:41 crc kubenswrapper[4689]: I0123 10:50:41.305728 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 23 10:50:41 crc kubenswrapper[4689]: W0123 10:50:41.315845 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod05d8033b_a234_4b54_a290_23623c596753.slice/crio-6da541d235be1c87b98bce028fc119a163e23768cb2c30b3fe54b7268cbe1e27 WatchSource:0}: Error finding container 6da541d235be1c87b98bce028fc119a163e23768cb2c30b3fe54b7268cbe1e27: Status 404 returned error can't find the container with id 6da541d235be1c87b98bce028fc119a163e23768cb2c30b3fe54b7268cbe1e27 Jan 23 10:50:42 crc kubenswrapper[4689]: I0123 10:50:42.175192 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"05d8033b-a234-4b54-a290-23623c596753","Type":"ContainerStarted","Data":"6da541d235be1c87b98bce028fc119a163e23768cb2c30b3fe54b7268cbe1e27"} Jan 23 10:50:43 crc kubenswrapper[4689]: I0123 10:50:43.187942 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"05d8033b-a234-4b54-a290-23623c596753","Type":"ContainerStarted","Data":"67a17bc11c5298517963acb7f3277c800699886ee5a4c575166581e3365a5f3c"} Jan 23 10:50:43 crc kubenswrapper[4689]: I0123 10:50:43.202988 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.20296597 podStartE2EDuration="3.20296597s" podCreationTimestamp="2026-01-23 10:50:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:50:43.202842337 +0000 UTC m=+107.827522206" watchObservedRunningTime="2026-01-23 10:50:43.20296597 +0000 UTC m=+107.827645849" Jan 23 10:50:44 crc kubenswrapper[4689]: I0123 10:50:44.195636 4689 generic.go:334] "Generic (PLEG): container finished" podID="05d8033b-a234-4b54-a290-23623c596753" containerID="67a17bc11c5298517963acb7f3277c800699886ee5a4c575166581e3365a5f3c" exitCode=0 Jan 23 10:50:44 crc kubenswrapper[4689]: I0123 10:50:44.196206 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"05d8033b-a234-4b54-a290-23623c596753","Type":"ContainerDied","Data":"67a17bc11c5298517963acb7f3277c800699886ee5a4c575166581e3365a5f3c"} Jan 23 10:50:44 crc kubenswrapper[4689]: I0123 10:50:44.478356 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 10:50:44 crc kubenswrapper[4689]: I0123 10:50:44.680290 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:44 crc kubenswrapper[4689]: I0123 10:50:44.685875 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:50:46 crc kubenswrapper[4689]: I0123 10:50:46.634951 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:46 crc kubenswrapper[4689]: I0123 10:50:46.733414 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/05d8033b-a234-4b54-a290-23623c596753-kube-api-access\") pod \"05d8033b-a234-4b54-a290-23623c596753\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " Jan 23 10:50:46 crc kubenswrapper[4689]: I0123 10:50:46.733803 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/05d8033b-a234-4b54-a290-23623c596753-kubelet-dir\") pod \"05d8033b-a234-4b54-a290-23623c596753\" (UID: \"05d8033b-a234-4b54-a290-23623c596753\") " Jan 23 10:50:46 crc kubenswrapper[4689]: I0123 10:50:46.733935 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05d8033b-a234-4b54-a290-23623c596753-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "05d8033b-a234-4b54-a290-23623c596753" (UID: "05d8033b-a234-4b54-a290-23623c596753"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:50:46 crc kubenswrapper[4689]: I0123 10:50:46.734483 4689 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/05d8033b-a234-4b54-a290-23623c596753-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:50:46 crc kubenswrapper[4689]: I0123 10:50:46.749491 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05d8033b-a234-4b54-a290-23623c596753-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "05d8033b-a234-4b54-a290-23623c596753" (UID: "05d8033b-a234-4b54-a290-23623c596753"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:50:46 crc kubenswrapper[4689]: I0123 10:50:46.836078 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/05d8033b-a234-4b54-a290-23623c596753-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:50:47 crc kubenswrapper[4689]: I0123 10:50:47.214890 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"05d8033b-a234-4b54-a290-23623c596753","Type":"ContainerDied","Data":"6da541d235be1c87b98bce028fc119a163e23768cb2c30b3fe54b7268cbe1e27"} Jan 23 10:50:47 crc kubenswrapper[4689]: I0123 10:50:47.215061 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 10:50:47 crc kubenswrapper[4689]: I0123 10:50:47.215490 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6da541d235be1c87b98bce028fc119a163e23768cb2c30b3fe54b7268cbe1e27" Jan 23 10:50:52 crc kubenswrapper[4689]: I0123 10:50:52.167889 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:51:04 crc kubenswrapper[4689]: E0123 10:51:04.778075 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 23 10:51:04 crc kubenswrapper[4689]: E0123 10:51:04.778803 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4gzll,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-nwt56_openshift-marketplace(d7315448-2c12-48b1-b49d-872cb9881a73): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:04 crc kubenswrapper[4689]: E0123 10:51:04.780348 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-nwt56" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" Jan 23 10:51:05 crc kubenswrapper[4689]: I0123 10:51:05.253303 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 10:51:06 crc kubenswrapper[4689]: E0123 10:51:06.586386 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-nwt56" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" Jan 23 10:51:07 crc kubenswrapper[4689]: E0123 10:51:07.105747 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 23 10:51:07 crc kubenswrapper[4689]: E0123 10:51:07.105895 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jzj22,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-mfvh2_openshift-marketplace(00567441-00cd-4594-a8bd-93db333e1a18): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:07 crc kubenswrapper[4689]: E0123 10:51:07.107216 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-mfvh2" podUID="00567441-00cd-4594-a8bd-93db333e1a18" Jan 23 10:51:08 crc kubenswrapper[4689]: E0123 10:51:08.176288 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 23 10:51:08 crc kubenswrapper[4689]: E0123 10:51:08.176530 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v8jcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-94lj8_openshift-marketplace(2e380655-ff7b-4f59-92ee-53074cc1b4ca): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:08 crc kubenswrapper[4689]: E0123 10:51:08.178278 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-94lj8" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" Jan 23 10:51:10 crc kubenswrapper[4689]: I0123 10:51:10.501585 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 10:51:10 crc kubenswrapper[4689]: I0123 10:51:10.501676 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 10:51:12 crc kubenswrapper[4689]: E0123 10:51:12.305782 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-mfvh2" podUID="00567441-00cd-4594-a8bd-93db333e1a18" Jan 23 10:51:12 crc kubenswrapper[4689]: E0123 10:51:12.305873 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-94lj8" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" Jan 23 10:51:12 crc kubenswrapper[4689]: E0123 10:51:12.672309 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 23 10:51:12 crc kubenswrapper[4689]: E0123 10:51:12.672602 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mflsz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-j8pkz_openshift-marketplace(43400493-28cb-47ff-a065-797a27a93d58): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:12 crc kubenswrapper[4689]: E0123 10:51:12.673825 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-j8pkz" podUID="43400493-28cb-47ff-a065-797a27a93d58" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.680018 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 23 10:51:14 crc kubenswrapper[4689]: E0123 10:51:14.680345 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d8033b-a234-4b54-a290-23623c596753" containerName="pruner" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.680360 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d8033b-a234-4b54-a290-23623c596753" containerName="pruner" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.680487 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d8033b-a234-4b54-a290-23623c596753" containerName="pruner" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.680938 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.684025 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.685205 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.685717 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.862853 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d3a158bc-5105-4b88-bc04-1c765e6b9206-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.862909 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d3a158bc-5105-4b88-bc04-1c765e6b9206-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.964305 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d3a158bc-5105-4b88-bc04-1c765e6b9206-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.964369 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d3a158bc-5105-4b88-bc04-1c765e6b9206-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.964450 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d3a158bc-5105-4b88-bc04-1c765e6b9206-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:14 crc kubenswrapper[4689]: I0123 10:51:14.991853 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d3a158bc-5105-4b88-bc04-1c765e6b9206-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:15 crc kubenswrapper[4689]: I0123 10:51:15.081477 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:16 crc kubenswrapper[4689]: E0123 10:51:16.298486 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 23 10:51:16 crc kubenswrapper[4689]: E0123 10:51:16.298695 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8d2kj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vdzr6_openshift-marketplace(39c4a693-cb3e-49c8-9515-5b11f02093e0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:16 crc kubenswrapper[4689]: E0123 10:51:16.299892 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vdzr6" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.074457 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.075847 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.090456 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.147089 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kube-api-access\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.147246 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-var-lock\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.147360 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.248619 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.248777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kube-api-access\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.248852 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-var-lock\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.248857 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.249026 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-var-lock\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.289508 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kube-api-access\") pod \"installer-9-crc\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:19 crc kubenswrapper[4689]: I0123 10:51:19.410912 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:51:21 crc kubenswrapper[4689]: E0123 10:51:21.584758 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 23 10:51:21 crc kubenswrapper[4689]: E0123 10:51:21.585355 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xzxd5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-crlgp_openshift-marketplace(d580e02f-6e70-4e5b-b78a-025d90866c97): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:21 crc kubenswrapper[4689]: E0123 10:51:21.586768 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-crlgp" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.089252 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.089373 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.089411 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.089471 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.091991 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.092321 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.092327 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.101564 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.102011 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.115051 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.119314 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.119666 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.177556 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.186256 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 10:51:22 crc kubenswrapper[4689]: I0123 10:51:22.354437 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.113060 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-crlgp" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.114481 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vdzr6" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.145393 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.145553 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sxg4j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-nzrpf_openshift-marketplace(90aea647-917c-4e24-ada7-0b2ee683bd4f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.146866 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-nzrpf" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.153665 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.153815 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7qk4t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5x86b_openshift-marketplace(3f3d6207-0bc4-441d-bf97-406ec30d09a1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.155035 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-5x86b" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.420528 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5x86b" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" Jan 23 10:51:23 crc kubenswrapper[4689]: E0123 10:51:23.421326 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-nzrpf" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" Jan 23 10:51:23 crc kubenswrapper[4689]: I0123 10:51:23.574623 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 23 10:51:23 crc kubenswrapper[4689]: W0123 10:51:23.587605 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podd3a158bc_5105_4b88_bc04_1c765e6b9206.slice/crio-1d9354d7ca6fb6b7dcd8b84ed5dd368ca9cab0e3859d75cccea28f0ba129568a WatchSource:0}: Error finding container 1d9354d7ca6fb6b7dcd8b84ed5dd368ca9cab0e3859d75cccea28f0ba129568a: Status 404 returned error can't find the container with id 1d9354d7ca6fb6b7dcd8b84ed5dd368ca9cab0e3859d75cccea28f0ba129568a Jan 23 10:51:23 crc kubenswrapper[4689]: W0123 10:51:23.734716 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-52dd32fe60da3fd41dfbce7f54ab8c5dfd8504c721163574c3cf748e0632548d WatchSource:0}: Error finding container 52dd32fe60da3fd41dfbce7f54ab8c5dfd8504c721163574c3cf748e0632548d: Status 404 returned error can't find the container with id 52dd32fe60da3fd41dfbce7f54ab8c5dfd8504c721163574c3cf748e0632548d Jan 23 10:51:23 crc kubenswrapper[4689]: I0123 10:51:23.827894 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 23 10:51:23 crc kubenswrapper[4689]: W0123 10:51:23.850726 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod7863a55a_f25f_4385_b0d7_9b8f88361e3d.slice/crio-4aecdf4535e59bb987693323293bb416e4a656e6c82218b7c17e5572d53d6bce WatchSource:0}: Error finding container 4aecdf4535e59bb987693323293bb416e4a656e6c82218b7c17e5572d53d6bce: Status 404 returned error can't find the container with id 4aecdf4535e59bb987693323293bb416e4a656e6c82218b7c17e5572d53d6bce Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.426516 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b18bbc37ee3b9539059d3ae09541e8502bd6a51031b3e823919c8dbd1effb4db"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.426960 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"52dd32fe60da3fd41dfbce7f54ab8c5dfd8504c721163574c3cf748e0632548d"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.429337 4689 generic.go:334] "Generic (PLEG): container finished" podID="43400493-28cb-47ff-a065-797a27a93d58" containerID="7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088" exitCode=0 Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.429421 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8pkz" event={"ID":"43400493-28cb-47ff-a065-797a27a93d58","Type":"ContainerDied","Data":"7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.431554 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7863a55a-f25f-4385-b0d7-9b8f88361e3d","Type":"ContainerStarted","Data":"51a9c7b15371158d6709648f927a1ad1cf2d8e31f7e32613715b92772739dc92"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.431595 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7863a55a-f25f-4385-b0d7-9b8f88361e3d","Type":"ContainerStarted","Data":"4aecdf4535e59bb987693323293bb416e4a656e6c82218b7c17e5572d53d6bce"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.436191 4689 generic.go:334] "Generic (PLEG): container finished" podID="d7315448-2c12-48b1-b49d-872cb9881a73" containerID="db34b311eb610c45012426c4b2cb8775d82c34d41b514b1c7f88ae61e1e32469" exitCode=0 Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.436263 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwt56" event={"ID":"d7315448-2c12-48b1-b49d-872cb9881a73","Type":"ContainerDied","Data":"db34b311eb610c45012426c4b2cb8775d82c34d41b514b1c7f88ae61e1e32469"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.438179 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d3a158bc-5105-4b88-bc04-1c765e6b9206","Type":"ContainerStarted","Data":"b07b63b046838e8c51702ebd5d66ce335db34591bbd3d9910c1dd775dd755e37"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.438208 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d3a158bc-5105-4b88-bc04-1c765e6b9206","Type":"ContainerStarted","Data":"1d9354d7ca6fb6b7dcd8b84ed5dd368ca9cab0e3859d75cccea28f0ba129568a"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.440582 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"50a2745a312e348d2a11b90c6d54e6a1ec57f4cb44bdbd896b42a6f22196b84c"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.440606 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"a392dc71454abe582bf19d60241e4f31b4d4b917a1e1c2406946a4afc994a263"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.441020 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.450296 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3a4a2c12d83dda6ae08f7e93dbdcd2e9ce8afff5b1bda6b2e23ab49451119cdc"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.450378 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"568f9f71306252c7921b895c479ef2d3d1484a2c879c6929ca9a6b9d9f0e97a9"} Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.471897 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=10.471877055 podStartE2EDuration="10.471877055s" podCreationTimestamp="2026-01-23 10:51:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:51:24.471236697 +0000 UTC m=+149.095916566" watchObservedRunningTime="2026-01-23 10:51:24.471877055 +0000 UTC m=+149.096556934" Jan 23 10:51:24 crc kubenswrapper[4689]: I0123 10:51:24.652849 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=5.652831998 podStartE2EDuration="5.652831998s" podCreationTimestamp="2026-01-23 10:51:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:51:24.59140469 +0000 UTC m=+149.216084549" watchObservedRunningTime="2026-01-23 10:51:24.652831998 +0000 UTC m=+149.277511857" Jan 23 10:51:25 crc kubenswrapper[4689]: I0123 10:51:25.465562 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94lj8" event={"ID":"2e380655-ff7b-4f59-92ee-53074cc1b4ca","Type":"ContainerStarted","Data":"199114f98bad9852b5e0a763b8d2e54ceb8300c0f0d4ecea21d4ce80193e2d34"} Jan 23 10:51:25 crc kubenswrapper[4689]: I0123 10:51:25.468188 4689 generic.go:334] "Generic (PLEG): container finished" podID="d3a158bc-5105-4b88-bc04-1c765e6b9206" containerID="b07b63b046838e8c51702ebd5d66ce335db34591bbd3d9910c1dd775dd755e37" exitCode=0 Jan 23 10:51:25 crc kubenswrapper[4689]: I0123 10:51:25.468309 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d3a158bc-5105-4b88-bc04-1c765e6b9206","Type":"ContainerDied","Data":"b07b63b046838e8c51702ebd5d66ce335db34591bbd3d9910c1dd775dd755e37"} Jan 23 10:51:25 crc kubenswrapper[4689]: I0123 10:51:25.473076 4689 generic.go:334] "Generic (PLEG): container finished" podID="00567441-00cd-4594-a8bd-93db333e1a18" containerID="fc5c22c91d3a058788fd507405638de2d32f3e0c18f7b3116120c77eabefb36a" exitCode=0 Jan 23 10:51:25 crc kubenswrapper[4689]: I0123 10:51:25.473211 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mfvh2" event={"ID":"00567441-00cd-4594-a8bd-93db333e1a18","Type":"ContainerDied","Data":"fc5c22c91d3a058788fd507405638de2d32f3e0c18f7b3116120c77eabefb36a"} Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.482006 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mfvh2" event={"ID":"00567441-00cd-4594-a8bd-93db333e1a18","Type":"ContainerStarted","Data":"b19ae6926d94f13787da82c56ac4bea84c7498228fde59e41ac700647ada6f7e"} Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.484984 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwt56" event={"ID":"d7315448-2c12-48b1-b49d-872cb9881a73","Type":"ContainerStarted","Data":"7f8be17f40dc7410e04d55c469bd0fb2ffa23516f37cee5b16992b4aec643c92"} Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.486914 4689 generic.go:334] "Generic (PLEG): container finished" podID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerID="199114f98bad9852b5e0a763b8d2e54ceb8300c0f0d4ecea21d4ce80193e2d34" exitCode=0 Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.486970 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94lj8" event={"ID":"2e380655-ff7b-4f59-92ee-53074cc1b4ca","Type":"ContainerDied","Data":"199114f98bad9852b5e0a763b8d2e54ceb8300c0f0d4ecea21d4ce80193e2d34"} Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.489281 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8pkz" event={"ID":"43400493-28cb-47ff-a065-797a27a93d58","Type":"ContainerStarted","Data":"c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801"} Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.505594 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mfvh2" podStartSLOduration=3.685484818 podStartE2EDuration="53.505578031s" podCreationTimestamp="2026-01-23 10:50:33 +0000 UTC" firstStartedPulling="2026-01-23 10:50:36.024769315 +0000 UTC m=+100.649449174" lastFinishedPulling="2026-01-23 10:51:25.844862528 +0000 UTC m=+150.469542387" observedRunningTime="2026-01-23 10:51:26.504095531 +0000 UTC m=+151.128775400" watchObservedRunningTime="2026-01-23 10:51:26.505578031 +0000 UTC m=+151.130257890" Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.548512 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nwt56" podStartSLOduration=3.632212669 podStartE2EDuration="53.548497997s" podCreationTimestamp="2026-01-23 10:50:33 +0000 UTC" firstStartedPulling="2026-01-23 10:50:36.019401677 +0000 UTC m=+100.644081536" lastFinishedPulling="2026-01-23 10:51:25.935687005 +0000 UTC m=+150.560366864" observedRunningTime="2026-01-23 10:51:26.54791265 +0000 UTC m=+151.172592519" watchObservedRunningTime="2026-01-23 10:51:26.548497997 +0000 UTC m=+151.173177856" Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.570289 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j8pkz" podStartSLOduration=2.489406924 podStartE2EDuration="55.570269747s" podCreationTimestamp="2026-01-23 10:50:31 +0000 UTC" firstStartedPulling="2026-01-23 10:50:32.838427917 +0000 UTC m=+97.463107776" lastFinishedPulling="2026-01-23 10:51:25.91929074 +0000 UTC m=+150.543970599" observedRunningTime="2026-01-23 10:51:26.568252803 +0000 UTC m=+151.192932662" watchObservedRunningTime="2026-01-23 10:51:26.570269747 +0000 UTC m=+151.194949606" Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.741024 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.864323 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d3a158bc-5105-4b88-bc04-1c765e6b9206-kube-api-access\") pod \"d3a158bc-5105-4b88-bc04-1c765e6b9206\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.864402 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d3a158bc-5105-4b88-bc04-1c765e6b9206-kubelet-dir\") pod \"d3a158bc-5105-4b88-bc04-1c765e6b9206\" (UID: \"d3a158bc-5105-4b88-bc04-1c765e6b9206\") " Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.864492 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3a158bc-5105-4b88-bc04-1c765e6b9206-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d3a158bc-5105-4b88-bc04-1c765e6b9206" (UID: "d3a158bc-5105-4b88-bc04-1c765e6b9206"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.864988 4689 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d3a158bc-5105-4b88-bc04-1c765e6b9206-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.871006 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3a158bc-5105-4b88-bc04-1c765e6b9206-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d3a158bc-5105-4b88-bc04-1c765e6b9206" (UID: "d3a158bc-5105-4b88-bc04-1c765e6b9206"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:51:26 crc kubenswrapper[4689]: I0123 10:51:26.966641 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d3a158bc-5105-4b88-bc04-1c765e6b9206-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:27 crc kubenswrapper[4689]: I0123 10:51:27.502194 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 10:51:27 crc kubenswrapper[4689]: I0123 10:51:27.502616 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d3a158bc-5105-4b88-bc04-1c765e6b9206","Type":"ContainerDied","Data":"1d9354d7ca6fb6b7dcd8b84ed5dd368ca9cab0e3859d75cccea28f0ba129568a"} Jan 23 10:51:27 crc kubenswrapper[4689]: I0123 10:51:27.502658 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d9354d7ca6fb6b7dcd8b84ed5dd368ca9cab0e3859d75cccea28f0ba129568a" Jan 23 10:51:27 crc kubenswrapper[4689]: I0123 10:51:27.512875 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94lj8" event={"ID":"2e380655-ff7b-4f59-92ee-53074cc1b4ca","Type":"ContainerStarted","Data":"ba2e53c279f7b1262c3fc70bc1828219dd9161c285d59a0040469c0e4adfa5e8"} Jan 23 10:51:27 crc kubenswrapper[4689]: I0123 10:51:27.529760 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-94lj8" podStartSLOduration=2.458686105 podStartE2EDuration="56.529738483s" podCreationTimestamp="2026-01-23 10:50:31 +0000 UTC" firstStartedPulling="2026-01-23 10:50:32.856018529 +0000 UTC m=+97.480698388" lastFinishedPulling="2026-01-23 10:51:26.927070907 +0000 UTC m=+151.551750766" observedRunningTime="2026-01-23 10:51:27.52744456 +0000 UTC m=+152.152124439" watchObservedRunningTime="2026-01-23 10:51:27.529738483 +0000 UTC m=+152.154418342" Jan 23 10:51:31 crc kubenswrapper[4689]: I0123 10:51:31.996747 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:51:31 crc kubenswrapper[4689]: I0123 10:51:31.997477 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:51:32 crc kubenswrapper[4689]: I0123 10:51:32.189047 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:51:32 crc kubenswrapper[4689]: I0123 10:51:32.189430 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:51:32 crc kubenswrapper[4689]: I0123 10:51:32.189543 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:51:32 crc kubenswrapper[4689]: I0123 10:51:32.248387 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:51:32 crc kubenswrapper[4689]: I0123 10:51:32.577104 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:51:32 crc kubenswrapper[4689]: I0123 10:51:32.584680 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:51:33 crc kubenswrapper[4689]: I0123 10:51:33.260300 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j8pkz"] Jan 23 10:51:33 crc kubenswrapper[4689]: I0123 10:51:33.311239 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:51:33 crc kubenswrapper[4689]: I0123 10:51:33.311319 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.068097 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.068519 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.118230 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.264760 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.265050 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.326253 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.563252 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j8pkz" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="registry-server" containerID="cri-o://c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801" gracePeriod=2 Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.598078 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:51:34 crc kubenswrapper[4689]: I0123 10:51:34.598932 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.144920 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.283753 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mflsz\" (UniqueName: \"kubernetes.io/projected/43400493-28cb-47ff-a065-797a27a93d58-kube-api-access-mflsz\") pod \"43400493-28cb-47ff-a065-797a27a93d58\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.283857 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-catalog-content\") pod \"43400493-28cb-47ff-a065-797a27a93d58\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.283914 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-utilities\") pod \"43400493-28cb-47ff-a065-797a27a93d58\" (UID: \"43400493-28cb-47ff-a065-797a27a93d58\") " Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.284839 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-utilities" (OuterVolumeSpecName: "utilities") pod "43400493-28cb-47ff-a065-797a27a93d58" (UID: "43400493-28cb-47ff-a065-797a27a93d58"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.288885 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43400493-28cb-47ff-a065-797a27a93d58-kube-api-access-mflsz" (OuterVolumeSpecName: "kube-api-access-mflsz") pod "43400493-28cb-47ff-a065-797a27a93d58" (UID: "43400493-28cb-47ff-a065-797a27a93d58"). InnerVolumeSpecName "kube-api-access-mflsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.334480 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "43400493-28cb-47ff-a065-797a27a93d58" (UID: "43400493-28cb-47ff-a065-797a27a93d58"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.385671 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mflsz\" (UniqueName: \"kubernetes.io/projected/43400493-28cb-47ff-a065-797a27a93d58-kube-api-access-mflsz\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.385707 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.385720 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43400493-28cb-47ff-a065-797a27a93d58-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.571528 4689 generic.go:334] "Generic (PLEG): container finished" podID="43400493-28cb-47ff-a065-797a27a93d58" containerID="c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801" exitCode=0 Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.571610 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8pkz" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.571614 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8pkz" event={"ID":"43400493-28cb-47ff-a065-797a27a93d58","Type":"ContainerDied","Data":"c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801"} Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.571662 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8pkz" event={"ID":"43400493-28cb-47ff-a065-797a27a93d58","Type":"ContainerDied","Data":"92a4798b38d155e18291af2f5d7fdd92f540ca697cc784eed42ec2260a1bf974"} Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.571684 4689 scope.go:117] "RemoveContainer" containerID="c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.595558 4689 scope.go:117] "RemoveContainer" containerID="7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.599643 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j8pkz"] Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.602036 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j8pkz"] Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.626297 4689 scope.go:117] "RemoveContainer" containerID="34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.639671 4689 scope.go:117] "RemoveContainer" containerID="c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801" Jan 23 10:51:35 crc kubenswrapper[4689]: E0123 10:51:35.642691 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801\": container with ID starting with c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801 not found: ID does not exist" containerID="c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.642751 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801"} err="failed to get container status \"c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801\": rpc error: code = NotFound desc = could not find container \"c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801\": container with ID starting with c6b5db0e83e08248086e978ca28015ae9f52844c231a7a3f328040e0ef211801 not found: ID does not exist" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.642816 4689 scope.go:117] "RemoveContainer" containerID="7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088" Jan 23 10:51:35 crc kubenswrapper[4689]: E0123 10:51:35.643202 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088\": container with ID starting with 7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088 not found: ID does not exist" containerID="7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.643226 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088"} err="failed to get container status \"7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088\": rpc error: code = NotFound desc = could not find container \"7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088\": container with ID starting with 7c02b942e37acf0b7eb21d7aae477a6d461332a16a3c6bd69f2eba3d02133088 not found: ID does not exist" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.643239 4689 scope.go:117] "RemoveContainer" containerID="34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0" Jan 23 10:51:35 crc kubenswrapper[4689]: E0123 10:51:35.645487 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0\": container with ID starting with 34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0 not found: ID does not exist" containerID="34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.645518 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0"} err="failed to get container status \"34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0\": rpc error: code = NotFound desc = could not find container \"34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0\": container with ID starting with 34fe31116b604d7a854a801c5fc79ad65fbef6b5fc825b85643ef22b43c12ed0 not found: ID does not exist" Jan 23 10:51:35 crc kubenswrapper[4689]: I0123 10:51:35.647813 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43400493-28cb-47ff-a065-797a27a93d58" path="/var/lib/kubelet/pods/43400493-28cb-47ff-a065-797a27a93d58/volumes" Jan 23 10:51:37 crc kubenswrapper[4689]: I0123 10:51:37.654400 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwt56"] Jan 23 10:51:37 crc kubenswrapper[4689]: I0123 10:51:37.654927 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nwt56" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="registry-server" containerID="cri-o://7f8be17f40dc7410e04d55c469bd0fb2ffa23516f37cee5b16992b4aec643c92" gracePeriod=2 Jan 23 10:51:38 crc kubenswrapper[4689]: I0123 10:51:38.589057 4689 generic.go:334] "Generic (PLEG): container finished" podID="d7315448-2c12-48b1-b49d-872cb9881a73" containerID="7f8be17f40dc7410e04d55c469bd0fb2ffa23516f37cee5b16992b4aec643c92" exitCode=0 Jan 23 10:51:38 crc kubenswrapper[4689]: I0123 10:51:38.589100 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwt56" event={"ID":"d7315448-2c12-48b1-b49d-872cb9881a73","Type":"ContainerDied","Data":"7f8be17f40dc7410e04d55c469bd0fb2ffa23516f37cee5b16992b4aec643c92"} Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.095050 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.251431 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-utilities\") pod \"d7315448-2c12-48b1-b49d-872cb9881a73\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.251582 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gzll\" (UniqueName: \"kubernetes.io/projected/d7315448-2c12-48b1-b49d-872cb9881a73-kube-api-access-4gzll\") pod \"d7315448-2c12-48b1-b49d-872cb9881a73\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.252545 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-utilities" (OuterVolumeSpecName: "utilities") pod "d7315448-2c12-48b1-b49d-872cb9881a73" (UID: "d7315448-2c12-48b1-b49d-872cb9881a73"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.252722 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-catalog-content\") pod \"d7315448-2c12-48b1-b49d-872cb9881a73\" (UID: \"d7315448-2c12-48b1-b49d-872cb9881a73\") " Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.253000 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.274636 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7315448-2c12-48b1-b49d-872cb9881a73-kube-api-access-4gzll" (OuterVolumeSpecName: "kube-api-access-4gzll") pod "d7315448-2c12-48b1-b49d-872cb9881a73" (UID: "d7315448-2c12-48b1-b49d-872cb9881a73"). InnerVolumeSpecName "kube-api-access-4gzll". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.285420 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d7315448-2c12-48b1-b49d-872cb9881a73" (UID: "d7315448-2c12-48b1-b49d-872cb9881a73"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.353985 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gzll\" (UniqueName: \"kubernetes.io/projected/d7315448-2c12-48b1-b49d-872cb9881a73-kube-api-access-4gzll\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.354020 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7315448-2c12-48b1-b49d-872cb9881a73-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.602367 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwt56" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.602381 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwt56" event={"ID":"d7315448-2c12-48b1-b49d-872cb9881a73","Type":"ContainerDied","Data":"e7fffd0ce66f791272e4532a0a68793a26a382fada3a188bf48760d398e00113"} Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.602438 4689 scope.go:117] "RemoveContainer" containerID="7f8be17f40dc7410e04d55c469bd0fb2ffa23516f37cee5b16992b4aec643c92" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.603870 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdzr6" event={"ID":"39c4a693-cb3e-49c8-9515-5b11f02093e0","Type":"ContainerStarted","Data":"c9299ca8ae4da06baaecfcf242ab1589ddb2a21f0e13ee86b36786ee4ed31ebc"} Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.632330 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwt56"] Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.634646 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwt56"] Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.775928 4689 scope.go:117] "RemoveContainer" containerID="db34b311eb610c45012426c4b2cb8775d82c34d41b514b1c7f88ae61e1e32469" Jan 23 10:51:40 crc kubenswrapper[4689]: I0123 10:51:40.810436 4689 scope.go:117] "RemoveContainer" containerID="9ed8f4865819532d8b2fdf0b1ebb3681b4a1fa296b8e335488913a0bde10f897" Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.611925 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzrpf" event={"ID":"90aea647-917c-4e24-ada7-0b2ee683bd4f","Type":"ContainerStarted","Data":"ba95d4ffbbedc8db82e0a2a1cd4e513a75e6ed25ee9f8bee2695fadb2370ab45"} Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.614536 4689 generic.go:334] "Generic (PLEG): container finished" podID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerID="aef6eb1372f25bb71c2ee5c5d0de7431eaf22aa07fb21e65cb9a492ca4371d9e" exitCode=0 Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.614601 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-crlgp" event={"ID":"d580e02f-6e70-4e5b-b78a-025d90866c97","Type":"ContainerDied","Data":"aef6eb1372f25bb71c2ee5c5d0de7431eaf22aa07fb21e65cb9a492ca4371d9e"} Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.617325 4689 generic.go:334] "Generic (PLEG): container finished" podID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerID="312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752" exitCode=0 Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.617355 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5x86b" event={"ID":"3f3d6207-0bc4-441d-bf97-406ec30d09a1","Type":"ContainerDied","Data":"312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752"} Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.620689 4689 generic.go:334] "Generic (PLEG): container finished" podID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerID="c9299ca8ae4da06baaecfcf242ab1589ddb2a21f0e13ee86b36786ee4ed31ebc" exitCode=0 Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.620729 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdzr6" event={"ID":"39c4a693-cb3e-49c8-9515-5b11f02093e0","Type":"ContainerDied","Data":"c9299ca8ae4da06baaecfcf242ab1589ddb2a21f0e13ee86b36786ee4ed31ebc"} Jan 23 10:51:41 crc kubenswrapper[4689]: I0123 10:51:41.646158 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" path="/var/lib/kubelet/pods/d7315448-2c12-48b1-b49d-872cb9881a73/volumes" Jan 23 10:51:42 crc kubenswrapper[4689]: I0123 10:51:42.627296 4689 generic.go:334] "Generic (PLEG): container finished" podID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerID="ba95d4ffbbedc8db82e0a2a1cd4e513a75e6ed25ee9f8bee2695fadb2370ab45" exitCode=0 Jan 23 10:51:42 crc kubenswrapper[4689]: I0123 10:51:42.627377 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzrpf" event={"ID":"90aea647-917c-4e24-ada7-0b2ee683bd4f","Type":"ContainerDied","Data":"ba95d4ffbbedc8db82e0a2a1cd4e513a75e6ed25ee9f8bee2695fadb2370ab45"} Jan 23 10:51:43 crc kubenswrapper[4689]: I0123 10:51:43.007184 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwhbm"] Jan 23 10:51:43 crc kubenswrapper[4689]: I0123 10:51:43.633947 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5x86b" event={"ID":"3f3d6207-0bc4-441d-bf97-406ec30d09a1","Type":"ContainerStarted","Data":"37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e"} Jan 23 10:51:43 crc kubenswrapper[4689]: I0123 10:51:43.635895 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdzr6" event={"ID":"39c4a693-cb3e-49c8-9515-5b11f02093e0","Type":"ContainerStarted","Data":"30754005cc74e1172714ebdcace87dcffc582c8cb52cba88ac5e5106982b30b8"} Jan 23 10:51:43 crc kubenswrapper[4689]: I0123 10:51:43.646922 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-crlgp" event={"ID":"d580e02f-6e70-4e5b-b78a-025d90866c97","Type":"ContainerStarted","Data":"417850f4688835e0859140bf9217128c8aac8e5833f730b64e96371ab038128d"} Jan 23 10:51:43 crc kubenswrapper[4689]: I0123 10:51:43.657401 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5x86b" podStartSLOduration=3.6192765 podStartE2EDuration="1m9.657385221s" podCreationTimestamp="2026-01-23 10:50:34 +0000 UTC" firstStartedPulling="2026-01-23 10:50:37.020671355 +0000 UTC m=+101.645351214" lastFinishedPulling="2026-01-23 10:51:43.058780076 +0000 UTC m=+167.683459935" observedRunningTime="2026-01-23 10:51:43.654521153 +0000 UTC m=+168.279201012" watchObservedRunningTime="2026-01-23 10:51:43.657385221 +0000 UTC m=+168.282065070" Jan 23 10:51:43 crc kubenswrapper[4689]: I0123 10:51:43.672744 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vdzr6" podStartSLOduration=3.339357673 podStartE2EDuration="1m12.672729307s" podCreationTimestamp="2026-01-23 10:50:31 +0000 UTC" firstStartedPulling="2026-01-23 10:50:32.860119134 +0000 UTC m=+97.484798983" lastFinishedPulling="2026-01-23 10:51:42.193490758 +0000 UTC m=+166.818170617" observedRunningTime="2026-01-23 10:51:43.671953687 +0000 UTC m=+168.296633546" watchObservedRunningTime="2026-01-23 10:51:43.672729307 +0000 UTC m=+168.297409166" Jan 23 10:51:43 crc kubenswrapper[4689]: I0123 10:51:43.693922 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-crlgp" podStartSLOduration=2.359648675 podStartE2EDuration="1m11.693901592s" podCreationTimestamp="2026-01-23 10:50:32 +0000 UTC" firstStartedPulling="2026-01-23 10:50:33.906382567 +0000 UTC m=+98.531062426" lastFinishedPulling="2026-01-23 10:51:43.240635484 +0000 UTC m=+167.865315343" observedRunningTime="2026-01-23 10:51:43.691920449 +0000 UTC m=+168.316600308" watchObservedRunningTime="2026-01-23 10:51:43.693901592 +0000 UTC m=+168.318581451" Jan 23 10:51:44 crc kubenswrapper[4689]: I0123 10:51:44.650856 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzrpf" event={"ID":"90aea647-917c-4e24-ada7-0b2ee683bd4f","Type":"ContainerStarted","Data":"85651ad73e08c1e5ee8b4fe6e10337396a4adc8fd06f6b2145b10c90173e56d2"} Jan 23 10:51:44 crc kubenswrapper[4689]: I0123 10:51:44.669137 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nzrpf" podStartSLOduration=3.181301721 podStartE2EDuration="1m9.669116575s" podCreationTimestamp="2026-01-23 10:50:35 +0000 UTC" firstStartedPulling="2026-01-23 10:50:37.055840378 +0000 UTC m=+101.680520237" lastFinishedPulling="2026-01-23 10:51:43.543655232 +0000 UTC m=+168.168335091" observedRunningTime="2026-01-23 10:51:44.667171082 +0000 UTC m=+169.291850961" watchObservedRunningTime="2026-01-23 10:51:44.669116575 +0000 UTC m=+169.293796434" Jan 23 10:51:45 crc kubenswrapper[4689]: I0123 10:51:45.158234 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:51:45 crc kubenswrapper[4689]: I0123 10:51:45.158314 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:51:45 crc kubenswrapper[4689]: I0123 10:51:45.384839 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:51:45 crc kubenswrapper[4689]: I0123 10:51:45.385174 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:51:46 crc kubenswrapper[4689]: I0123 10:51:46.217502 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5x86b" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="registry-server" probeResult="failure" output=< Jan 23 10:51:46 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 10:51:46 crc kubenswrapper[4689]: > Jan 23 10:51:46 crc kubenswrapper[4689]: I0123 10:51:46.425608 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nzrpf" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="registry-server" probeResult="failure" output=< Jan 23 10:51:46 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 10:51:46 crc kubenswrapper[4689]: > Jan 23 10:51:51 crc kubenswrapper[4689]: I0123 10:51:51.772255 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:51:51 crc kubenswrapper[4689]: I0123 10:51:51.773320 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:51:51 crc kubenswrapper[4689]: I0123 10:51:51.811780 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:51:52 crc kubenswrapper[4689]: I0123 10:51:52.437010 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:51:52 crc kubenswrapper[4689]: I0123 10:51:52.437063 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:51:52 crc kubenswrapper[4689]: I0123 10:51:52.477761 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:51:52 crc kubenswrapper[4689]: I0123 10:51:52.725060 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:51:52 crc kubenswrapper[4689]: I0123 10:51:52.728424 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:51:55 crc kubenswrapper[4689]: I0123 10:51:55.224638 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:51:55 crc kubenswrapper[4689]: I0123 10:51:55.299472 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:51:55 crc kubenswrapper[4689]: I0123 10:51:55.439765 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:51:55 crc kubenswrapper[4689]: I0123 10:51:55.489468 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:51:56 crc kubenswrapper[4689]: I0123 10:51:56.061987 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-crlgp"] Jan 23 10:51:56 crc kubenswrapper[4689]: I0123 10:51:56.062423 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-crlgp" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="registry-server" containerID="cri-o://417850f4688835e0859140bf9217128c8aac8e5833f730b64e96371ab038128d" gracePeriod=2 Jan 23 10:51:57 crc kubenswrapper[4689]: I0123 10:51:57.721060 4689 generic.go:334] "Generic (PLEG): container finished" podID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerID="417850f4688835e0859140bf9217128c8aac8e5833f730b64e96371ab038128d" exitCode=0 Jan 23 10:51:57 crc kubenswrapper[4689]: I0123 10:51:57.721139 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-crlgp" event={"ID":"d580e02f-6e70-4e5b-b78a-025d90866c97","Type":"ContainerDied","Data":"417850f4688835e0859140bf9217128c8aac8e5833f730b64e96371ab038128d"} Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.258026 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nzrpf"] Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.258388 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nzrpf" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="registry-server" containerID="cri-o://85651ad73e08c1e5ee8b4fe6e10337396a4adc8fd06f6b2145b10c90173e56d2" gracePeriod=2 Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.601820 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.625742 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzxd5\" (UniqueName: \"kubernetes.io/projected/d580e02f-6e70-4e5b-b78a-025d90866c97-kube-api-access-xzxd5\") pod \"d580e02f-6e70-4e5b-b78a-025d90866c97\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.625829 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-utilities\") pod \"d580e02f-6e70-4e5b-b78a-025d90866c97\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.625898 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-catalog-content\") pod \"d580e02f-6e70-4e5b-b78a-025d90866c97\" (UID: \"d580e02f-6e70-4e5b-b78a-025d90866c97\") " Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.626917 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-utilities" (OuterVolumeSpecName: "utilities") pod "d580e02f-6e70-4e5b-b78a-025d90866c97" (UID: "d580e02f-6e70-4e5b-b78a-025d90866c97"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.627305 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.631829 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d580e02f-6e70-4e5b-b78a-025d90866c97-kube-api-access-xzxd5" (OuterVolumeSpecName: "kube-api-access-xzxd5") pod "d580e02f-6e70-4e5b-b78a-025d90866c97" (UID: "d580e02f-6e70-4e5b-b78a-025d90866c97"). InnerVolumeSpecName "kube-api-access-xzxd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.693762 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d580e02f-6e70-4e5b-b78a-025d90866c97" (UID: "d580e02f-6e70-4e5b-b78a-025d90866c97"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.727752 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzxd5\" (UniqueName: \"kubernetes.io/projected/d580e02f-6e70-4e5b-b78a-025d90866c97-kube-api-access-xzxd5\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.727784 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d580e02f-6e70-4e5b-b78a-025d90866c97-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.731645 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-crlgp" event={"ID":"d580e02f-6e70-4e5b-b78a-025d90866c97","Type":"ContainerDied","Data":"5b5c75aaba6657d2be939f1835d73eb11f37f25fe4b8470d0d3907a179a32a42"} Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.731703 4689 scope.go:117] "RemoveContainer" containerID="417850f4688835e0859140bf9217128c8aac8e5833f730b64e96371ab038128d" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.731747 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-crlgp" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.752524 4689 scope.go:117] "RemoveContainer" containerID="aef6eb1372f25bb71c2ee5c5d0de7431eaf22aa07fb21e65cb9a492ca4371d9e" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.779912 4689 scope.go:117] "RemoveContainer" containerID="ccb2773c4d019338930b65048e47b85e19e66f6cfe01080df594624fe6c00171" Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.779960 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-crlgp"] Jan 23 10:51:58 crc kubenswrapper[4689]: I0123 10:51:58.784192 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-crlgp"] Jan 23 10:51:59 crc kubenswrapper[4689]: I0123 10:51:59.653675 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" path="/var/lib/kubelet/pods/d580e02f-6e70-4e5b-b78a-025d90866c97/volumes" Jan 23 10:51:59 crc kubenswrapper[4689]: I0123 10:51:59.745239 4689 generic.go:334] "Generic (PLEG): container finished" podID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerID="85651ad73e08c1e5ee8b4fe6e10337396a4adc8fd06f6b2145b10c90173e56d2" exitCode=0 Jan 23 10:51:59 crc kubenswrapper[4689]: I0123 10:51:59.745305 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzrpf" event={"ID":"90aea647-917c-4e24-ada7-0b2ee683bd4f","Type":"ContainerDied","Data":"85651ad73e08c1e5ee8b4fe6e10337396a4adc8fd06f6b2145b10c90173e56d2"} Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.534782 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.550259 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxg4j\" (UniqueName: \"kubernetes.io/projected/90aea647-917c-4e24-ada7-0b2ee683bd4f-kube-api-access-sxg4j\") pod \"90aea647-917c-4e24-ada7-0b2ee683bd4f\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.550360 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-catalog-content\") pod \"90aea647-917c-4e24-ada7-0b2ee683bd4f\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.559608 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90aea647-917c-4e24-ada7-0b2ee683bd4f-kube-api-access-sxg4j" (OuterVolumeSpecName: "kube-api-access-sxg4j") pod "90aea647-917c-4e24-ada7-0b2ee683bd4f" (UID: "90aea647-917c-4e24-ada7-0b2ee683bd4f"). InnerVolumeSpecName "kube-api-access-sxg4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.651540 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-utilities\") pod \"90aea647-917c-4e24-ada7-0b2ee683bd4f\" (UID: \"90aea647-917c-4e24-ada7-0b2ee683bd4f\") " Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.652103 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxg4j\" (UniqueName: \"kubernetes.io/projected/90aea647-917c-4e24-ada7-0b2ee683bd4f-kube-api-access-sxg4j\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.654074 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-utilities" (OuterVolumeSpecName: "utilities") pod "90aea647-917c-4e24-ada7-0b2ee683bd4f" (UID: "90aea647-917c-4e24-ada7-0b2ee683bd4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.698664 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90aea647-917c-4e24-ada7-0b2ee683bd4f" (UID: "90aea647-917c-4e24-ada7-0b2ee683bd4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.753064 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.753092 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90aea647-917c-4e24-ada7-0b2ee683bd4f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.755847 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzrpf" event={"ID":"90aea647-917c-4e24-ada7-0b2ee683bd4f","Type":"ContainerDied","Data":"1b58312a5c246540dd03a5c687199402391e3522c0dfe4ffdad7a7e1a782569f"} Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.755935 4689 scope.go:117] "RemoveContainer" containerID="85651ad73e08c1e5ee8b4fe6e10337396a4adc8fd06f6b2145b10c90173e56d2" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.755988 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzrpf" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.779834 4689 scope.go:117] "RemoveContainer" containerID="ba95d4ffbbedc8db82e0a2a1cd4e513a75e6ed25ee9f8bee2695fadb2370ab45" Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.788913 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nzrpf"] Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.792787 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nzrpf"] Jan 23 10:52:00 crc kubenswrapper[4689]: I0123 10:52:00.815831 4689 scope.go:117] "RemoveContainer" containerID="e9164db7bd38ed676b6ee3be7b533f69350e34fd7d2579bbc3876aedb4db1524" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.650068 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" path="/var/lib/kubelet/pods/90aea647-917c-4e24-ada7-0b2ee683bd4f/volumes" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898048 4689 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898263 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898274 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898284 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898290 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898299 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898305 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898315 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898321 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898330 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3a158bc-5105-4b88-bc04-1c765e6b9206" containerName="pruner" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898336 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3a158bc-5105-4b88-bc04-1c765e6b9206" containerName="pruner" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898345 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898366 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898374 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898380 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898386 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898392 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898401 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898407 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="extract-utilities" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898414 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898420 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898427 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898440 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898454 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898461 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="extract-content" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.898471 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898478 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898579 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7315448-2c12-48b1-b49d-872cb9881a73" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898590 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="43400493-28cb-47ff-a065-797a27a93d58" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898599 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="90aea647-917c-4e24-ada7-0b2ee683bd4f" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898609 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d580e02f-6e70-4e5b-b78a-025d90866c97" containerName="registry-server" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898617 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3a158bc-5105-4b88-bc04-1c765e6b9206" containerName="pruner" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.898962 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.899620 4689 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.900129 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751" gracePeriod=15 Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.900200 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30" gracePeriod=15 Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.900234 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14" gracePeriod=15 Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.900328 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35" gracePeriod=15 Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.900350 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10" gracePeriod=15 Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901360 4689 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.901569 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901586 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.901613 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901625 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.901644 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901657 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.901673 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901685 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.901699 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901711 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.901727 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901738 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 23 10:52:01 crc kubenswrapper[4689]: E0123 10:52:01.901752 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.901765 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.902002 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.902028 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.902050 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.902064 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.902080 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 23 10:52:01 crc kubenswrapper[4689]: I0123 10:52:01.902483 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.071685 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.072086 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.072121 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.072162 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.072199 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.072227 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.072248 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.072280 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.172921 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.172984 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173143 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173030 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173215 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173249 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173312 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173320 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173381 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173428 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173429 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173496 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173508 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173546 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173616 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.173623 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.183800 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.184689 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.185325 4689 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.340554 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.340684 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.772747 4689 generic.go:334] "Generic (PLEG): container finished" podID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" containerID="51a9c7b15371158d6709648f927a1ad1cf2d8e31f7e32613715b92772739dc92" exitCode=0 Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.772816 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7863a55a-f25f-4385-b0d7-9b8f88361e3d","Type":"ContainerDied","Data":"51a9c7b15371158d6709648f927a1ad1cf2d8e31f7e32613715b92772739dc92"} Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.773525 4689 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.773787 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.774055 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.775954 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.777520 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.778462 4689 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751" exitCode=0 Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.778484 4689 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14" exitCode=0 Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.778493 4689 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30" exitCode=0 Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.778500 4689 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35" exitCode=2 Jan 23 10:52:02 crc kubenswrapper[4689]: I0123 10:52:02.778534 4689 scope.go:117] "RemoveContainer" containerID="ecd7c78cd975be74e1d838c82849f615953d88ccaec3e47cb61753afb2dcc734" Jan 23 10:52:03 crc kubenswrapper[4689]: E0123 10:52:03.009738 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:03Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:03Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:03Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:03Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:3295ee1e384bd13d7f93a565d0e83b4cb096da43c673235ced6ac2c39d64dfa1\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:91b55f2f378a9a1fbbda6c2423a0a3bc0c66e0dd45dee584db70782d1b7ba863\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1671873254},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:86aa2e9e8c3a1d4fdb701dc4c88eca6a9d0e219a7bd13fb13cb88cb1d0868ba4\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:f24d420ce166977917c7165d0314801df739a06bf165feb72ef8dea197d6fab9\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1203140844},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:2b72e40c5d5b36b681f40c16ebf3dcac6520ed0c79f174ba87f673ab7afd209a\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:d83ee77ad07e06451a84205ac4c85c69e912a1c975e1a8a95095d79218028dce\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1178956511},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:8ec63a5af90efa25f6221a312db015f279dc78f8c7319e0fa1782471e1e18acf\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:99b77813d1f8030ff0e28a82bfc5b89346cbad2ca5cb2f89274e21e035b5b066\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1176015092},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:03 crc kubenswrapper[4689]: E0123 10:52:03.010502 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:03 crc kubenswrapper[4689]: E0123 10:52:03.011075 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:03 crc kubenswrapper[4689]: E0123 10:52:03.011453 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:03 crc kubenswrapper[4689]: E0123 10:52:03.011850 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:03 crc kubenswrapper[4689]: E0123 10:52:03.011894 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:52:03 crc kubenswrapper[4689]: I0123 10:52:03.310783 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:52:03 crc kubenswrapper[4689]: I0123 10:52:03.310856 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:52:03 crc kubenswrapper[4689]: E0123 10:52:03.311580 4689 event.go:368] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/events/machine-config-daemon-sp7sf.188d56a489b2c20b\": dial tcp 38.102.83.179:6443: connect: connection refused" event=< Jan 23 10:52:03 crc kubenswrapper[4689]: &Event{ObjectMeta:{machine-config-daemon-sp7sf.188d56a489b2c20b openshift-machine-config-operator 29212 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:machine-config-daemon-sp7sf,UID:3d8de6cc-a03d-468b-bfe9-fbf544087653,APIVersion:v1,ResourceVersion:26541,FieldPath:spec.containers{machine-config-daemon},},Reason:ProbeError,Message:Liveness probe error: Get "http://127.0.0.1:8798/health": dial tcp 127.0.0.1:8798: connect: connection refused Jan 23 10:52:03 crc kubenswrapper[4689]: body: Jan 23 10:52:03 crc kubenswrapper[4689]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 10:51:33 +0000 UTC,LastTimestamp:2026-01-23 10:52:03.31083689 +0000 UTC m=+187.935516789,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 23 10:52:03 crc kubenswrapper[4689]: > Jan 23 10:52:03 crc kubenswrapper[4689]: I0123 10:52:03.787529 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.146648 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.147543 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.147745 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.289391 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.290899 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.291497 4689 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.292091 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.292593 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.301122 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kubelet-dir\") pod \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.301242 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-var-lock\") pod \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.301229 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7863a55a-f25f-4385-b0d7-9b8f88361e3d" (UID: "7863a55a-f25f-4385-b0d7-9b8f88361e3d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.301301 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kube-api-access\") pod \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\" (UID: \"7863a55a-f25f-4385-b0d7-9b8f88361e3d\") " Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.301354 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-var-lock" (OuterVolumeSpecName: "var-lock") pod "7863a55a-f25f-4385-b0d7-9b8f88361e3d" (UID: "7863a55a-f25f-4385-b0d7-9b8f88361e3d"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.301565 4689 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.301579 4689 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7863a55a-f25f-4385-b0d7-9b8f88361e3d-var-lock\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.305908 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7863a55a-f25f-4385-b0d7-9b8f88361e3d" (UID: "7863a55a-f25f-4385-b0d7-9b8f88361e3d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.402319 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.402461 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.402501 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.402532 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.402625 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.402645 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.403288 4689 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.403327 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7863a55a-f25f-4385-b0d7-9b8f88361e3d-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.403347 4689 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.403364 4689 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.797716 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.800410 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7863a55a-f25f-4385-b0d7-9b8f88361e3d","Type":"ContainerDied","Data":"4aecdf4535e59bb987693323293bb416e4a656e6c82218b7c17e5572d53d6bce"} Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.800488 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4aecdf4535e59bb987693323293bb416e4a656e6c82218b7c17e5572d53d6bce" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.805811 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.807324 4689 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10" exitCode=0 Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.807413 4689 scope.go:117] "RemoveContainer" containerID="5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.807590 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.824749 4689 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.825692 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.826433 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.836664 4689 scope.go:117] "RemoveContainer" containerID="3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.841072 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.841529 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.841955 4689 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.868072 4689 scope.go:117] "RemoveContainer" containerID="97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.900388 4689 scope.go:117] "RemoveContainer" containerID="9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.921862 4689 scope.go:117] "RemoveContainer" containerID="8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.950991 4689 scope.go:117] "RemoveContainer" containerID="3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.975236 4689 scope.go:117] "RemoveContainer" containerID="5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751" Jan 23 10:52:04 crc kubenswrapper[4689]: E0123 10:52:04.976755 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\": container with ID starting with 5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751 not found: ID does not exist" containerID="5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.976800 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751"} err="failed to get container status \"5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\": rpc error: code = NotFound desc = could not find container \"5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751\": container with ID starting with 5860a7be846d283742617f40dfb0bda6724db4de3577133befa70a11580a8751 not found: ID does not exist" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.976833 4689 scope.go:117] "RemoveContainer" containerID="3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14" Jan 23 10:52:04 crc kubenswrapper[4689]: E0123 10:52:04.977701 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\": container with ID starting with 3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14 not found: ID does not exist" containerID="3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.977785 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14"} err="failed to get container status \"3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\": rpc error: code = NotFound desc = could not find container \"3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14\": container with ID starting with 3bdaa0baa7482216b214890be0319ca7592fa93290dcc22d4e55d076a5779b14 not found: ID does not exist" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.977810 4689 scope.go:117] "RemoveContainer" containerID="97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30" Jan 23 10:52:04 crc kubenswrapper[4689]: E0123 10:52:04.978486 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\": container with ID starting with 97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30 not found: ID does not exist" containerID="97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.978519 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30"} err="failed to get container status \"97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\": rpc error: code = NotFound desc = could not find container \"97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30\": container with ID starting with 97746360cf5c694de7481fb708b18061f4a1146828e6def82ec8c083820f2a30 not found: ID does not exist" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.978539 4689 scope.go:117] "RemoveContainer" containerID="9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35" Jan 23 10:52:04 crc kubenswrapper[4689]: E0123 10:52:04.978911 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\": container with ID starting with 9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35 not found: ID does not exist" containerID="9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.978951 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35"} err="failed to get container status \"9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\": rpc error: code = NotFound desc = could not find container \"9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35\": container with ID starting with 9ca5de5be94904a8bdbf1054f8f4e122ce1792fabb069e4833bd132ae824ac35 not found: ID does not exist" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.978970 4689 scope.go:117] "RemoveContainer" containerID="8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10" Jan 23 10:52:04 crc kubenswrapper[4689]: E0123 10:52:04.979249 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\": container with ID starting with 8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10 not found: ID does not exist" containerID="8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.979281 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10"} err="failed to get container status \"8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\": rpc error: code = NotFound desc = could not find container \"8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10\": container with ID starting with 8327883aab2061392860faa81b593bc8950e568f847a91daae783020c8f1dd10 not found: ID does not exist" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.979301 4689 scope.go:117] "RemoveContainer" containerID="3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16" Jan 23 10:52:04 crc kubenswrapper[4689]: E0123 10:52:04.979581 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\": container with ID starting with 3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16 not found: ID does not exist" containerID="3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16" Jan 23 10:52:04 crc kubenswrapper[4689]: I0123 10:52:04.979606 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16"} err="failed to get container status \"3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\": rpc error: code = NotFound desc = could not find container \"3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16\": container with ID starting with 3196ee394c0d2a140a575afd9ac43d3d8c344d839ebd0dcb74fc311614c61f16 not found: ID does not exist" Jan 23 10:52:05 crc kubenswrapper[4689]: I0123 10:52:05.643006 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:05 crc kubenswrapper[4689]: I0123 10:52:05.643549 4689 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:05 crc kubenswrapper[4689]: I0123 10:52:05.643870 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:05 crc kubenswrapper[4689]: I0123 10:52:05.647849 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 23 10:52:06 crc kubenswrapper[4689]: E0123 10:52:06.945473 4689 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.179:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:06 crc kubenswrapper[4689]: I0123 10:52:06.946078 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.665888 4689 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.666476 4689 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.666969 4689 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.667513 4689 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.667816 4689 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:07 crc kubenswrapper[4689]: I0123 10:52:07.667856 4689 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.668398 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="200ms" Jan 23 10:52:07 crc kubenswrapper[4689]: I0123 10:52:07.826644 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"dbc9eea6d0e6506babfc6410f3fe15a361763061b66eb3aa2332a95aa6b70333"} Jan 23 10:52:07 crc kubenswrapper[4689]: I0123 10:52:07.826700 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"20be9a4c6396c12453d98803213ae8942d0d6cde17950e2ccd623025c75d4e67"} Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.827584 4689 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.179:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:52:07 crc kubenswrapper[4689]: I0123 10:52:07.828608 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:07 crc kubenswrapper[4689]: I0123 10:52:07.829020 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:07 crc kubenswrapper[4689]: E0123 10:52:07.869401 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="400ms" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.031612 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" containerName="oauth-openshift" containerID="cri-o://911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40" gracePeriod=15 Jan 23 10:52:08 crc kubenswrapper[4689]: E0123 10:52:08.270483 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="800ms" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.394975 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.395825 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.396340 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.396644 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556635 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gjms\" (UniqueName: \"kubernetes.io/projected/83e87693-d35f-4125-a703-f9c5e9a5652c-kube-api-access-7gjms\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556773 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-provider-selection\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556799 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-serving-cert\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556845 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-dir\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556871 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-error\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556912 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-login\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556934 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-policies\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556952 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-session\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.556979 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-cliconfig\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.557012 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-service-ca\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.557037 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-trusted-ca-bundle\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.557094 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-ocp-branding-template\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.557116 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-router-certs\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.557200 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-idp-0-file-data\") pod \"83e87693-d35f-4125-a703-f9c5e9a5652c\" (UID: \"83e87693-d35f-4125-a703-f9c5e9a5652c\") " Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.558906 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.559118 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.560390 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.561030 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.561801 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.570343 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.572379 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83e87693-d35f-4125-a703-f9c5e9a5652c-kube-api-access-7gjms" (OuterVolumeSpecName: "kube-api-access-7gjms") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "kube-api-access-7gjms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.572539 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.575274 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.575595 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.576301 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.576573 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.576882 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.577314 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "83e87693-d35f-4125-a703-f9c5e9a5652c" (UID: "83e87693-d35f-4125-a703-f9c5e9a5652c"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658325 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658362 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658377 4689 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658391 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658406 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658419 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658432 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658445 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658458 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658470 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658486 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gjms\" (UniqueName: \"kubernetes.io/projected/83e87693-d35f-4125-a703-f9c5e9a5652c-kube-api-access-7gjms\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658696 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658721 4689 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/83e87693-d35f-4125-a703-f9c5e9a5652c-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.658735 4689 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/83e87693-d35f-4125-a703-f9c5e9a5652c-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.835660 4689 generic.go:334] "Generic (PLEG): container finished" podID="83e87693-d35f-4125-a703-f9c5e9a5652c" containerID="911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40" exitCode=0 Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.835726 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" event={"ID":"83e87693-d35f-4125-a703-f9c5e9a5652c","Type":"ContainerDied","Data":"911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40"} Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.835806 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" event={"ID":"83e87693-d35f-4125-a703-f9c5e9a5652c","Type":"ContainerDied","Data":"4186dfb4538871753d7ad6e1b7cdf1ba91f00ef304c64e773ae8af4272e67ed5"} Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.835832 4689 scope.go:117] "RemoveContainer" containerID="911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.836347 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.837346 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.837884 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.838810 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.855907 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.856987 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.857622 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.863873 4689 scope.go:117] "RemoveContainer" containerID="911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40" Jan 23 10:52:08 crc kubenswrapper[4689]: E0123 10:52:08.864650 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40\": container with ID starting with 911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40 not found: ID does not exist" containerID="911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40" Jan 23 10:52:08 crc kubenswrapper[4689]: I0123 10:52:08.864772 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40"} err="failed to get container status \"911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40\": rpc error: code = NotFound desc = could not find container \"911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40\": container with ID starting with 911d0f0581fa1a974a3e5a183cbe6ad2a2f271d1669196c72583c86f2ee7da40 not found: ID does not exist" Jan 23 10:52:09 crc kubenswrapper[4689]: E0123 10:52:09.073647 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="1.6s" Jan 23 10:52:10 crc kubenswrapper[4689]: E0123 10:52:10.675127 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="3.2s" Jan 23 10:52:12 crc kubenswrapper[4689]: E0123 10:52:12.436627 4689 event.go:368] "Unable to write event (may retry after sleeping)" err="Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/events/machine-config-daemon-sp7sf.188d56a489b2c20b\": dial tcp 38.102.83.179:6443: connect: connection refused" event=< Jan 23 10:52:12 crc kubenswrapper[4689]: &Event{ObjectMeta:{machine-config-daemon-sp7sf.188d56a489b2c20b openshift-machine-config-operator 29212 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:machine-config-daemon-sp7sf,UID:3d8de6cc-a03d-468b-bfe9-fbf544087653,APIVersion:v1,ResourceVersion:26541,FieldPath:spec.containers{machine-config-daemon},},Reason:ProbeError,Message:Liveness probe error: Get "http://127.0.0.1:8798/health": dial tcp 127.0.0.1:8798: connect: connection refused Jan 23 10:52:12 crc kubenswrapper[4689]: body: Jan 23 10:52:12 crc kubenswrapper[4689]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 10:51:33 +0000 UTC,LastTimestamp:2026-01-23 10:52:03.31083689 +0000 UTC m=+187.935516789,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 23 10:52:12 crc kubenswrapper[4689]: > Jan 23 10:52:13 crc kubenswrapper[4689]: E0123 10:52:13.370699 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:13Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:13Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:13Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T10:52:13Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:3295ee1e384bd13d7f93a565d0e83b4cb096da43c673235ced6ac2c39d64dfa1\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:91b55f2f378a9a1fbbda6c2423a0a3bc0c66e0dd45dee584db70782d1b7ba863\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1671873254},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:86aa2e9e8c3a1d4fdb701dc4c88eca6a9d0e219a7bd13fb13cb88cb1d0868ba4\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:f24d420ce166977917c7165d0314801df739a06bf165feb72ef8dea197d6fab9\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1203140844},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:2b72e40c5d5b36b681f40c16ebf3dcac6520ed0c79f174ba87f673ab7afd209a\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:d83ee77ad07e06451a84205ac4c85c69e912a1c975e1a8a95095d79218028dce\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1178956511},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:8ec63a5af90efa25f6221a312db015f279dc78f8c7319e0fa1782471e1e18acf\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:99b77813d1f8030ff0e28a82bfc5b89346cbad2ca5cb2f89274e21e035b5b066\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1176015092},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:13 crc kubenswrapper[4689]: E0123 10:52:13.371245 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:13 crc kubenswrapper[4689]: E0123 10:52:13.371779 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:13 crc kubenswrapper[4689]: E0123 10:52:13.372247 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:13 crc kubenswrapper[4689]: E0123 10:52:13.372576 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:13 crc kubenswrapper[4689]: E0123 10:52:13.372605 4689 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 10:52:13 crc kubenswrapper[4689]: E0123 10:52:13.876627 4689 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="6.4s" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.639249 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.639989 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.641301 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.641772 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.651419 4689 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.651457 4689 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:14 crc kubenswrapper[4689]: E0123 10:52:14.651982 4689 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.652645 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:14 crc kubenswrapper[4689]: W0123 10:52:14.677213 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-e3cb85bb770c0722e62bdebd2734d2a96e4b9fe924882b2b4846a1f4dae77052 WatchSource:0}: Error finding container e3cb85bb770c0722e62bdebd2734d2a96e4b9fe924882b2b4846a1f4dae77052: Status 404 returned error can't find the container with id e3cb85bb770c0722e62bdebd2734d2a96e4b9fe924882b2b4846a1f4dae77052 Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.875779 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.875892 4689 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34" exitCode=1 Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.875981 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34"} Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.876563 4689 scope.go:117] "RemoveContainer" containerID="f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.876758 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.877286 4689 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.877303 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e3cb85bb770c0722e62bdebd2734d2a96e4b9fe924882b2b4846a1f4dae77052"} Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.877921 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:14 crc kubenswrapper[4689]: I0123 10:52:14.878637 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.631420 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.647175 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.647516 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.647843 4689 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.648191 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.648487 4689 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.884098 4689 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="56b6b416cdf1932bb9a7da444e27e7c989110680d9659b4855c62b102ee0e51e" exitCode=0 Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.884184 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"56b6b416cdf1932bb9a7da444e27e7c989110680d9659b4855c62b102ee0e51e"} Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.884390 4689 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.884406 4689 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:15 crc kubenswrapper[4689]: E0123 10:52:15.884910 4689 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.884919 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.885532 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.885783 4689 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.885992 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.886315 4689 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.887584 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.887668 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4f3cf7caf60d56d4b30a07807d8746b90cb2691b010108b3bbea27a26f318a9c"} Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.888504 4689 status_manager.go:851] "Failed to get status for pod" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.888805 4689 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.889142 4689 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.889570 4689 status_manager.go:851] "Failed to get status for pod" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" pod="openshift-authentication/oauth-openshift-558db77b4-cwhbm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-cwhbm\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:15 crc kubenswrapper[4689]: I0123 10:52:15.890084 4689 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.179:6443: connect: connection refused" Jan 23 10:52:16 crc kubenswrapper[4689]: I0123 10:52:16.907878 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3e5f01517c7fb7cea8a91bbb43d4268b77ca28f031f51f62ce7a12fc3209ab84"} Jan 23 10:52:16 crc kubenswrapper[4689]: I0123 10:52:16.908279 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"de7e9044a57fd710499fd1ba8f34d2c84be658c9696e398dd8121d42fa9973c1"} Jan 23 10:52:16 crc kubenswrapper[4689]: I0123 10:52:16.908295 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ee3ca221fff9313244868517c1a94b06764cc0f65b2ef965056f90b4896032f3"} Jan 23 10:52:16 crc kubenswrapper[4689]: I0123 10:52:16.908306 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b190d9de48acecca8e31062915777c915bcb179dc335107ab7d679993a4b1958"} Jan 23 10:52:17 crc kubenswrapper[4689]: I0123 10:52:17.914734 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"18c0df8fa88b12db0975b028260b8846a5631d2be2c2a3225c127f4318fd0080"} Jan 23 10:52:17 crc kubenswrapper[4689]: I0123 10:52:17.915095 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:17 crc kubenswrapper[4689]: I0123 10:52:17.915131 4689 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:17 crc kubenswrapper[4689]: I0123 10:52:17.915172 4689 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:19 crc kubenswrapper[4689]: I0123 10:52:19.653306 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:19 crc kubenswrapper[4689]: I0123 10:52:19.653693 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:19 crc kubenswrapper[4689]: I0123 10:52:19.658917 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:21 crc kubenswrapper[4689]: I0123 10:52:21.638463 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:52:21 crc kubenswrapper[4689]: I0123 10:52:21.639295 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 23 10:52:21 crc kubenswrapper[4689]: I0123 10:52:21.639434 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 23 10:52:22 crc kubenswrapper[4689]: I0123 10:52:22.927199 4689 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:22 crc kubenswrapper[4689]: I0123 10:52:22.944325 4689 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:22 crc kubenswrapper[4689]: I0123 10:52:22.944373 4689 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:22 crc kubenswrapper[4689]: I0123 10:52:22.951613 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:23 crc kubenswrapper[4689]: I0123 10:52:23.049532 4689 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="11601001-13d5-40c2-865d-e11c777e3b2f" Jan 23 10:52:23 crc kubenswrapper[4689]: I0123 10:52:23.949882 4689 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:23 crc kubenswrapper[4689]: I0123 10:52:23.950518 4689 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:23 crc kubenswrapper[4689]: I0123 10:52:23.953636 4689 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="11601001-13d5-40c2-865d-e11c777e3b2f" Jan 23 10:52:25 crc kubenswrapper[4689]: I0123 10:52:25.630831 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:52:31 crc kubenswrapper[4689]: I0123 10:52:31.639043 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 23 10:52:31 crc kubenswrapper[4689]: I0123 10:52:31.639726 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 23 10:52:32 crc kubenswrapper[4689]: I0123 10:52:32.817395 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.158628 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.233456 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.276225 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.311585 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.311652 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.311703 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.312301 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.312354 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4" gracePeriod=600 Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.337494 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.369678 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 23 10:52:33 crc kubenswrapper[4689]: I0123 10:52:33.486393 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.025597 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4" exitCode=0 Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.025669 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4"} Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.026286 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"e315c320f0ad2702681ecbe44e73ceed0977d5c34d8230cf6b96dd29e24d95c8"} Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.286108 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.366704 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.579688 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.672045 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.728382 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.751459 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.790836 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.857430 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 23 10:52:34 crc kubenswrapper[4689]: I0123 10:52:34.878641 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.055250 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.293120 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.357257 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.517328 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.661760 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.771452 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.820324 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.933640 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 23 10:52:35 crc kubenswrapper[4689]: I0123 10:52:35.996017 4689 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.135196 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.186004 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.360376 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.461559 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.652251 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.664478 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.706139 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.790196 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.815723 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.873546 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.909615 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 23 10:52:36 crc kubenswrapper[4689]: I0123 10:52:36.978597 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.048280 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.108901 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.247257 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.271070 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.391450 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.565243 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.595683 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.637758 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.651913 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.652736 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.662207 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.672085 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.767300 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.798482 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.843832 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.895700 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.959558 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 23 10:52:37 crc kubenswrapper[4689]: I0123 10:52:37.996543 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.008454 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.076816 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.180482 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.221539 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.247912 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.269979 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.271863 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.320721 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.366573 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.383522 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.556489 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.637922 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.660684 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.667539 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.677677 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.688614 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.693461 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.698057 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.767736 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.789643 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.840277 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.854805 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.957675 4689 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.960187 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.964883 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 23 10:52:38 crc kubenswrapper[4689]: I0123 10:52:38.981809 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.105845 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.107300 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.142175 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.218236 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.247237 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.296899 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.351468 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.405483 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.414774 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.420841 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.463763 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.506515 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.575813 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.606773 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.635076 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.671473 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.692562 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.737626 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.843394 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 23 10:52:39 crc kubenswrapper[4689]: I0123 10:52:39.946911 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.007327 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.108858 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.242644 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.349551 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.362814 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.365109 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.403052 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.435133 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.535536 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.549040 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.626220 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.709004 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.717922 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.782835 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.788078 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.825432 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.852699 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.933426 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 23 10:52:40 crc kubenswrapper[4689]: I0123 10:52:40.985248 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.022544 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.224355 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.254263 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.307839 4689 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.377752 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.499963 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.520030 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.598362 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.618995 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.647833 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.654060 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.677231 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.724112 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.732954 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.734196 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.833194 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.875761 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.882910 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.985186 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 23 10:52:41 crc kubenswrapper[4689]: I0123 10:52:41.999229 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.017960 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.051781 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.088395 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.117741 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.157971 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.237690 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.295484 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.334909 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.349006 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.374459 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.473862 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.576112 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.584599 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.615619 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.663629 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.675275 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.730197 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.757763 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.938629 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 23 10:52:42 crc kubenswrapper[4689]: I0123 10:52:42.971333 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.026936 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.028091 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.033907 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.083340 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.194189 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.399498 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.462854 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.465095 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.609804 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.823091 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.896988 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.914575 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.927440 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 23 10:52:43 crc kubenswrapper[4689]: I0123 10:52:43.945385 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.008039 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.058138 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.506456 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.567911 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.592904 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.731904 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.815794 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.827997 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.836890 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.886802 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 23 10:52:44 crc kubenswrapper[4689]: I0123 10:52:44.903228 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.020554 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.074705 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.088142 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.124789 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.131354 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.309383 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.336763 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.356181 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.570609 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.612970 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.691889 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.735981 4689 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.750140 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.820473 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 23 10:52:45 crc kubenswrapper[4689]: I0123 10:52:45.859797 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.063606 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.157053 4689 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.162433 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwhbm","openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.162523 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5"] Jan 23 10:52:46 crc kubenswrapper[4689]: E0123 10:52:46.162752 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" containerName="installer" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.162776 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" containerName="installer" Jan 23 10:52:46 crc kubenswrapper[4689]: E0123 10:52:46.162796 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" containerName="oauth-openshift" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.162808 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" containerName="oauth-openshift" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.162948 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" containerName="oauth-openshift" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.162965 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7863a55a-f25f-4385-b0d7-9b8f88361e3d" containerName="installer" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.163052 4689 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.163097 4689 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b506d5b9-724d-425b-bb31-d21fb6b92080" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.163526 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.170240 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.174166 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.174378 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.174646 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.176136 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.176457 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.179426 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.179437 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.179714 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.179591 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.179583 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.179849 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.180955 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.187488 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.190550 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.197444 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.214330 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.221011 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.220992189 podStartE2EDuration="24.220992189s" podCreationTimestamp="2026-01-23 10:52:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:52:46.220896676 +0000 UTC m=+230.845576615" watchObservedRunningTime="2026-01-23 10:52:46.220992189 +0000 UTC m=+230.845672038" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.266623 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.271259 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-service-ca\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.271620 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-router-certs\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.271707 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.271917 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272042 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-error\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272102 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e81a398-514c-4bfa-9038-7ede14a02743-audit-dir\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272197 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-audit-policies\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272254 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272316 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272381 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-serving-cert\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272442 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-session\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272507 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-cliconfig\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272588 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-login\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.272672 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc9p5\" (UniqueName: \"kubernetes.io/projected/5e81a398-514c-4bfa-9038-7ede14a02743-kube-api-access-qc9p5\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.324833 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374308 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-error\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374371 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e81a398-514c-4bfa-9038-7ede14a02743-audit-dir\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374410 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-audit-policies\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374443 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374571 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5e81a398-514c-4bfa-9038-7ede14a02743-audit-dir\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374676 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374745 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-serving-cert\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374782 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-session\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374833 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-cliconfig\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.374872 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-login\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.375640 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-audit-policies\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.375821 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc9p5\" (UniqueName: \"kubernetes.io/projected/5e81a398-514c-4bfa-9038-7ede14a02743-kube-api-access-qc9p5\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.375890 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-service-ca\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.375931 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.375964 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-router-certs\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.376006 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.378508 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.378670 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-service-ca\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.378852 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-cliconfig\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.383750 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.384040 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.384382 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-error\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.385024 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-router-certs\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.385215 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.385742 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-serving-cert\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.391456 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-user-template-login\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.393616 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5e81a398-514c-4bfa-9038-7ede14a02743-v4-0-config-system-session\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.400076 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc9p5\" (UniqueName: \"kubernetes.io/projected/5e81a398-514c-4bfa-9038-7ede14a02743-kube-api-access-qc9p5\") pod \"oauth-openshift-77df6bdc9c-zhjr5\" (UID: \"5e81a398-514c-4bfa-9038-7ede14a02743\") " pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.488407 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.525644 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.633365 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.749811 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5"] Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.756006 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.799582 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.834674 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.864783 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.902194 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.954301 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 23 10:52:46 crc kubenswrapper[4689]: I0123 10:52:46.960725 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.100024 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" event={"ID":"5e81a398-514c-4bfa-9038-7ede14a02743","Type":"ContainerStarted","Data":"9bb53ad27423e374c91fcecac5c50a28095cbd08e2a7b976e8ddea89ea691429"} Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.150570 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.160783 4689 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.176248 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.242943 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.262690 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.659435 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.669531 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83e87693-d35f-4125-a703-f9c5e9a5652c" path="/var/lib/kubelet/pods/83e87693-d35f-4125-a703-f9c5e9a5652c/volumes" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.746646 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.806517 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.950796 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 23 10:52:47 crc kubenswrapper[4689]: I0123 10:52:47.997474 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 23 10:52:48 crc kubenswrapper[4689]: I0123 10:52:48.038546 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 23 10:52:48 crc kubenswrapper[4689]: I0123 10:52:48.128713 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 23 10:52:48 crc kubenswrapper[4689]: I0123 10:52:48.205136 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 23 10:52:48 crc kubenswrapper[4689]: I0123 10:52:48.371862 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 23 10:52:48 crc kubenswrapper[4689]: I0123 10:52:48.506879 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 23 10:52:48 crc kubenswrapper[4689]: I0123 10:52:48.883551 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 23 10:52:48 crc kubenswrapper[4689]: I0123 10:52:48.902578 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.105235 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.116864 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" event={"ID":"5e81a398-514c-4bfa-9038-7ede14a02743","Type":"ContainerStarted","Data":"ffd3ae9e6aa8933d9d68a18735ff9e6b4b4b12a171b4e366fa7a22da630b6545"} Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.117263 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.125650 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.142377 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podStartSLOduration=66.142345985 podStartE2EDuration="1m6.142345985s" podCreationTimestamp="2026-01-23 10:51:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:52:49.139914999 +0000 UTC m=+233.764594848" watchObservedRunningTime="2026-01-23 10:52:49.142345985 +0000 UTC m=+233.767025924" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.480076 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.552684 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.611579 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 23 10:52:49 crc kubenswrapper[4689]: I0123 10:52:49.751008 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 23 10:52:50 crc kubenswrapper[4689]: I0123 10:52:50.218525 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 23 10:52:50 crc kubenswrapper[4689]: I0123 10:52:50.259596 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 23 10:52:50 crc kubenswrapper[4689]: I0123 10:52:50.854170 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 23 10:52:56 crc kubenswrapper[4689]: I0123 10:52:56.634929 4689 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 23 10:52:56 crc kubenswrapper[4689]: I0123 10:52:56.635645 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://dbc9eea6d0e6506babfc6410f3fe15a361763061b66eb3aa2332a95aa6b70333" gracePeriod=5 Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.198543 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.198873 4689 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="dbc9eea6d0e6506babfc6410f3fe15a361763061b66eb3aa2332a95aa6b70333" exitCode=137 Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.198955 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20be9a4c6396c12453d98803213ae8942d0d6cde17950e2ccd623025c75d4e67" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.201181 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.201271 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.396537 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.396701 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.396898 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.397110 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.397265 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.397249 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.397347 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.397391 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.397466 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.398249 4689 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.398341 4689 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.398403 4689 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.398425 4689 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.412005 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 10:53:02 crc kubenswrapper[4689]: I0123 10:53:02.499340 4689 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:03 crc kubenswrapper[4689]: I0123 10:53:03.206352 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 10:53:03 crc kubenswrapper[4689]: I0123 10:53:03.651844 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.307179 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cnwdd"] Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.307880 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" podUID="275e6b8d-6343-4146-8f0d-f9b6125e272a" containerName="controller-manager" containerID="cri-o://7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270" gracePeriod=30 Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.447380 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4"] Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.447807 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" podUID="f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" containerName="route-controller-manager" containerID="cri-o://1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d" gracePeriod=30 Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.713913 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.761683 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870306 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-client-ca\") pod \"275e6b8d-6343-4146-8f0d-f9b6125e272a\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870365 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjxd4\" (UniqueName: \"kubernetes.io/projected/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-kube-api-access-hjxd4\") pod \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870397 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-config\") pod \"275e6b8d-6343-4146-8f0d-f9b6125e272a\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870452 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-client-ca\") pod \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870480 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-config\") pod \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870502 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-proxy-ca-bundles\") pod \"275e6b8d-6343-4146-8f0d-f9b6125e272a\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870529 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7vmv\" (UniqueName: \"kubernetes.io/projected/275e6b8d-6343-4146-8f0d-f9b6125e272a-kube-api-access-b7vmv\") pod \"275e6b8d-6343-4146-8f0d-f9b6125e272a\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870550 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-serving-cert\") pod \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\" (UID: \"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.870586 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/275e6b8d-6343-4146-8f0d-f9b6125e272a-serving-cert\") pod \"275e6b8d-6343-4146-8f0d-f9b6125e272a\" (UID: \"275e6b8d-6343-4146-8f0d-f9b6125e272a\") " Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.871623 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-client-ca" (OuterVolumeSpecName: "client-ca") pod "275e6b8d-6343-4146-8f0d-f9b6125e272a" (UID: "275e6b8d-6343-4146-8f0d-f9b6125e272a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.871486 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-config" (OuterVolumeSpecName: "config") pod "275e6b8d-6343-4146-8f0d-f9b6125e272a" (UID: "275e6b8d-6343-4146-8f0d-f9b6125e272a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.872003 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-client-ca" (OuterVolumeSpecName: "client-ca") pod "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" (UID: "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.872073 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-config" (OuterVolumeSpecName: "config") pod "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" (UID: "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.872411 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "275e6b8d-6343-4146-8f0d-f9b6125e272a" (UID: "275e6b8d-6343-4146-8f0d-f9b6125e272a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.876303 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/275e6b8d-6343-4146-8f0d-f9b6125e272a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "275e6b8d-6343-4146-8f0d-f9b6125e272a" (UID: "275e6b8d-6343-4146-8f0d-f9b6125e272a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.876374 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/275e6b8d-6343-4146-8f0d-f9b6125e272a-kube-api-access-b7vmv" (OuterVolumeSpecName: "kube-api-access-b7vmv") pod "275e6b8d-6343-4146-8f0d-f9b6125e272a" (UID: "275e6b8d-6343-4146-8f0d-f9b6125e272a"). InnerVolumeSpecName "kube-api-access-b7vmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.876443 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-kube-api-access-hjxd4" (OuterVolumeSpecName: "kube-api-access-hjxd4") pod "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" (UID: "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4"). InnerVolumeSpecName "kube-api-access-hjxd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.876739 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" (UID: "f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972216 4689 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972268 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972277 4689 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972288 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7vmv\" (UniqueName: \"kubernetes.io/projected/275e6b8d-6343-4146-8f0d-f9b6125e272a-kube-api-access-b7vmv\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972296 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972306 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/275e6b8d-6343-4146-8f0d-f9b6125e272a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972313 4689 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972364 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjxd4\" (UniqueName: \"kubernetes.io/projected/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4-kube-api-access-hjxd4\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:15 crc kubenswrapper[4689]: I0123 10:53:15.972373 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/275e6b8d-6343-4146-8f0d-f9b6125e272a-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027261 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-55b446ff67-pmjl9"] Jan 23 10:53:16 crc kubenswrapper[4689]: E0123 10:53:16.027460 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" containerName="route-controller-manager" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027472 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" containerName="route-controller-manager" Jan 23 10:53:16 crc kubenswrapper[4689]: E0123 10:53:16.027481 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="275e6b8d-6343-4146-8f0d-f9b6125e272a" containerName="controller-manager" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027486 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="275e6b8d-6343-4146-8f0d-f9b6125e272a" containerName="controller-manager" Jan 23 10:53:16 crc kubenswrapper[4689]: E0123 10:53:16.027495 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027500 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027592 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027603 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="275e6b8d-6343-4146-8f0d-f9b6125e272a" containerName="controller-manager" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027612 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" containerName="route-controller-manager" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.027952 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.039437 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55b446ff67-pmjl9"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.114249 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.115004 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.123960 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.175072 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d1d16bb-318a-4761-98f6-d9f3da693b25-serving-cert\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.175111 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-proxy-ca-bundles\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.175243 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgkxn\" (UniqueName: \"kubernetes.io/projected/9d1d16bb-318a-4761-98f6-d9f3da693b25-kube-api-access-cgkxn\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.175414 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-client-ca\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.175471 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-config\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.276998 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-proxy-ca-bundles\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277068 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgkxn\" (UniqueName: \"kubernetes.io/projected/9d1d16bb-318a-4761-98f6-d9f3da693b25-kube-api-access-cgkxn\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277118 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-client-ca\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277167 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pglb\" (UniqueName: \"kubernetes.io/projected/01b03027-0731-4ad0-aa7f-266103bbb237-kube-api-access-4pglb\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277200 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-config\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277225 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-client-ca\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277259 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-config\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277280 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01b03027-0731-4ad0-aa7f-266103bbb237-serving-cert\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.277314 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d1d16bb-318a-4761-98f6-d9f3da693b25-serving-cert\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.278908 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-client-ca\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.279048 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-config\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.279651 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-proxy-ca-bundles\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.282334 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d1d16bb-318a-4761-98f6-d9f3da693b25-serving-cert\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.285311 4689 generic.go:334] "Generic (PLEG): container finished" podID="275e6b8d-6343-4146-8f0d-f9b6125e272a" containerID="7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270" exitCode=0 Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.285382 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" event={"ID":"275e6b8d-6343-4146-8f0d-f9b6125e272a","Type":"ContainerDied","Data":"7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270"} Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.285410 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" event={"ID":"275e6b8d-6343-4146-8f0d-f9b6125e272a","Type":"ContainerDied","Data":"f46bc0cd972cd3192f88295c01ccf6a856783119221d8170e33e7973f1f31898"} Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.285430 4689 scope.go:117] "RemoveContainer" containerID="7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.285457 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cnwdd" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.286782 4689 generic.go:334] "Generic (PLEG): container finished" podID="f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" containerID="1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d" exitCode=0 Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.286812 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" event={"ID":"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4","Type":"ContainerDied","Data":"1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d"} Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.286831 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" event={"ID":"f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4","Type":"ContainerDied","Data":"b0776652588bbe0d46f0143f5ccb42280388e657e73d8dae8cf9487895e40e6d"} Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.286862 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.301732 4689 scope.go:117] "RemoveContainer" containerID="7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270" Jan 23 10:53:16 crc kubenswrapper[4689]: E0123 10:53:16.302068 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270\": container with ID starting with 7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270 not found: ID does not exist" containerID="7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.302095 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270"} err="failed to get container status \"7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270\": rpc error: code = NotFound desc = could not find container \"7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270\": container with ID starting with 7d36037874be79e622ce26d9fa80ab69b4424e3c1ac012bb038817c29eaa0270 not found: ID does not exist" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.302112 4689 scope.go:117] "RemoveContainer" containerID="1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.307201 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgkxn\" (UniqueName: \"kubernetes.io/projected/9d1d16bb-318a-4761-98f6-d9f3da693b25-kube-api-access-cgkxn\") pod \"controller-manager-55b446ff67-pmjl9\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.313806 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.316356 4689 scope.go:117] "RemoveContainer" containerID="1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d" Jan 23 10:53:16 crc kubenswrapper[4689]: E0123 10:53:16.316777 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d\": container with ID starting with 1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d not found: ID does not exist" containerID="1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.316827 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d"} err="failed to get container status \"1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d\": rpc error: code = NotFound desc = could not find container \"1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d\": container with ID starting with 1edfef6660c986b04ff3f6d06a4db84a4c5b48066f33281addfb68feda28a43d not found: ID does not exist" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.318053 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-94vk4"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.329453 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cnwdd"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.333799 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cnwdd"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.358600 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.378255 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pglb\" (UniqueName: \"kubernetes.io/projected/01b03027-0731-4ad0-aa7f-266103bbb237-kube-api-access-4pglb\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.378336 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-client-ca\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.378385 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-config\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.378417 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01b03027-0731-4ad0-aa7f-266103bbb237-serving-cert\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.379913 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-client-ca\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.380689 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-config\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.382427 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01b03027-0731-4ad0-aa7f-266103bbb237-serving-cert\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.394188 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pglb\" (UniqueName: \"kubernetes.io/projected/01b03027-0731-4ad0-aa7f-266103bbb237-kube-api-access-4pglb\") pod \"route-controller-manager-646bddd5f-2sd6r\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.428209 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.527769 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55b446ff67-pmjl9"] Jan 23 10:53:16 crc kubenswrapper[4689]: I0123 10:53:16.921388 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r"] Jan 23 10:53:16 crc kubenswrapper[4689]: W0123 10:53:16.930426 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01b03027_0731_4ad0_aa7f_266103bbb237.slice/crio-dee6fc4db50c71fbb34411cc3fc0db5696c8d2b99f2f4d86af21871177043cd2 WatchSource:0}: Error finding container dee6fc4db50c71fbb34411cc3fc0db5696c8d2b99f2f4d86af21871177043cd2: Status 404 returned error can't find the container with id dee6fc4db50c71fbb34411cc3fc0db5696c8d2b99f2f4d86af21871177043cd2 Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.296911 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" event={"ID":"01b03027-0731-4ad0-aa7f-266103bbb237","Type":"ContainerStarted","Data":"b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed"} Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.296970 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" event={"ID":"01b03027-0731-4ad0-aa7f-266103bbb237","Type":"ContainerStarted","Data":"dee6fc4db50c71fbb34411cc3fc0db5696c8d2b99f2f4d86af21871177043cd2"} Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.297122 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.300476 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" event={"ID":"9d1d16bb-318a-4761-98f6-d9f3da693b25","Type":"ContainerStarted","Data":"cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999"} Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.300501 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" event={"ID":"9d1d16bb-318a-4761-98f6-d9f3da693b25","Type":"ContainerStarted","Data":"cb3ab9d6717c9a965a5b26222d613c5ad4b6b3e3f71c1e7c44a7d436e90b06a1"} Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.300813 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.305501 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.314210 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" podStartSLOduration=1.314198886 podStartE2EDuration="1.314198886s" podCreationTimestamp="2026-01-23 10:53:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:53:17.31353704 +0000 UTC m=+261.938216919" watchObservedRunningTime="2026-01-23 10:53:17.314198886 +0000 UTC m=+261.938878745" Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.333737 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" podStartSLOduration=1.333705264 podStartE2EDuration="1.333705264s" podCreationTimestamp="2026-01-23 10:53:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:53:17.32753589 +0000 UTC m=+261.952215749" watchObservedRunningTime="2026-01-23 10:53:17.333705264 +0000 UTC m=+261.958385133" Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.511481 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.646895 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="275e6b8d-6343-4146-8f0d-f9b6125e272a" path="/var/lib/kubelet/pods/275e6b8d-6343-4146-8f0d-f9b6125e272a/volumes" Jan 23 10:53:17 crc kubenswrapper[4689]: I0123 10:53:17.647485 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4" path="/var/lib/kubelet/pods/f14a7cf3-46d6-4e97-9e1d-c7e1f08903a4/volumes" Jan 23 10:53:23 crc kubenswrapper[4689]: I0123 10:53:23.750605 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55b446ff67-pmjl9"] Jan 23 10:53:23 crc kubenswrapper[4689]: I0123 10:53:23.751741 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" podUID="9d1d16bb-318a-4761-98f6-d9f3da693b25" containerName="controller-manager" containerID="cri-o://cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999" gracePeriod=30 Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.306532 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.338093 4689 generic.go:334] "Generic (PLEG): container finished" podID="9d1d16bb-318a-4761-98f6-d9f3da693b25" containerID="cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999" exitCode=0 Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.338174 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" event={"ID":"9d1d16bb-318a-4761-98f6-d9f3da693b25","Type":"ContainerDied","Data":"cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999"} Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.338211 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" event={"ID":"9d1d16bb-318a-4761-98f6-d9f3da693b25","Type":"ContainerDied","Data":"cb3ab9d6717c9a965a5b26222d613c5ad4b6b3e3f71c1e7c44a7d436e90b06a1"} Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.338236 4689 scope.go:117] "RemoveContainer" containerID="cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.338389 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55b446ff67-pmjl9" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.364569 4689 scope.go:117] "RemoveContainer" containerID="cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999" Jan 23 10:53:24 crc kubenswrapper[4689]: E0123 10:53:24.366530 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999\": container with ID starting with cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999 not found: ID does not exist" containerID="cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.366577 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999"} err="failed to get container status \"cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999\": rpc error: code = NotFound desc = could not find container \"cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999\": container with ID starting with cdd93e11d8665c314ec0c0d4a6d3604cd6308ca8936e7570a8a0d63272e15999 not found: ID does not exist" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.476948 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d1d16bb-318a-4761-98f6-d9f3da693b25-serving-cert\") pod \"9d1d16bb-318a-4761-98f6-d9f3da693b25\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.477023 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgkxn\" (UniqueName: \"kubernetes.io/projected/9d1d16bb-318a-4761-98f6-d9f3da693b25-kube-api-access-cgkxn\") pod \"9d1d16bb-318a-4761-98f6-d9f3da693b25\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.477095 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-proxy-ca-bundles\") pod \"9d1d16bb-318a-4761-98f6-d9f3da693b25\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.477117 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-client-ca\") pod \"9d1d16bb-318a-4761-98f6-d9f3da693b25\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.477136 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-config\") pod \"9d1d16bb-318a-4761-98f6-d9f3da693b25\" (UID: \"9d1d16bb-318a-4761-98f6-d9f3da693b25\") " Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.478002 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-client-ca" (OuterVolumeSpecName: "client-ca") pod "9d1d16bb-318a-4761-98f6-d9f3da693b25" (UID: "9d1d16bb-318a-4761-98f6-d9f3da693b25"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.478084 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-config" (OuterVolumeSpecName: "config") pod "9d1d16bb-318a-4761-98f6-d9f3da693b25" (UID: "9d1d16bb-318a-4761-98f6-d9f3da693b25"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.478117 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9d1d16bb-318a-4761-98f6-d9f3da693b25" (UID: "9d1d16bb-318a-4761-98f6-d9f3da693b25"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.482123 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d1d16bb-318a-4761-98f6-d9f3da693b25-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d1d16bb-318a-4761-98f6-d9f3da693b25" (UID: "9d1d16bb-318a-4761-98f6-d9f3da693b25"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.485448 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d1d16bb-318a-4761-98f6-d9f3da693b25-kube-api-access-cgkxn" (OuterVolumeSpecName: "kube-api-access-cgkxn") pod "9d1d16bb-318a-4761-98f6-d9f3da693b25" (UID: "9d1d16bb-318a-4761-98f6-d9f3da693b25"). InnerVolumeSpecName "kube-api-access-cgkxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.578486 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d1d16bb-318a-4761-98f6-d9f3da693b25-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.578521 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgkxn\" (UniqueName: \"kubernetes.io/projected/9d1d16bb-318a-4761-98f6-d9f3da693b25-kube-api-access-cgkxn\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.578532 4689 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.578541 4689 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.578550 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d1d16bb-318a-4761-98f6-d9f3da693b25-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.668055 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55b446ff67-pmjl9"] Jan 23 10:53:24 crc kubenswrapper[4689]: I0123 10:53:24.677883 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-55b446ff67-pmjl9"] Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.480008 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6589dc88cb-62qls"] Jan 23 10:53:25 crc kubenswrapper[4689]: E0123 10:53:25.480257 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d1d16bb-318a-4761-98f6-d9f3da693b25" containerName="controller-manager" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.480269 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d1d16bb-318a-4761-98f6-d9f3da693b25" containerName="controller-manager" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.480353 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d1d16bb-318a-4761-98f6-d9f3da693b25" containerName="controller-manager" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.480691 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.482697 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.482943 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.483229 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.484053 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.485203 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.487127 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.500316 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.512443 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6589dc88cb-62qls"] Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.591310 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-client-ca\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.591363 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-config\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.591424 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9c27\" (UniqueName: \"kubernetes.io/projected/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-kube-api-access-j9c27\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.591453 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-serving-cert\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.591526 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-proxy-ca-bundles\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.654856 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d1d16bb-318a-4761-98f6-d9f3da693b25" path="/var/lib/kubelet/pods/9d1d16bb-318a-4761-98f6-d9f3da693b25/volumes" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.693277 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-proxy-ca-bundles\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.693364 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-client-ca\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.693401 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-config\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.693459 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9c27\" (UniqueName: \"kubernetes.io/projected/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-kube-api-access-j9c27\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.693496 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-serving-cert\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.694868 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-proxy-ca-bundles\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.695587 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-client-ca\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.696888 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-config\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.700892 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-serving-cert\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.726846 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9c27\" (UniqueName: \"kubernetes.io/projected/9c5d05df-7a1c-4c0c-b791-cc8e932d2560-kube-api-access-j9c27\") pod \"controller-manager-6589dc88cb-62qls\" (UID: \"9c5d05df-7a1c-4c0c-b791-cc8e932d2560\") " pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:25 crc kubenswrapper[4689]: I0123 10:53:25.798061 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:26 crc kubenswrapper[4689]: I0123 10:53:26.305620 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6589dc88cb-62qls"] Jan 23 10:53:26 crc kubenswrapper[4689]: I0123 10:53:26.351586 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" event={"ID":"9c5d05df-7a1c-4c0c-b791-cc8e932d2560","Type":"ContainerStarted","Data":"672257c46d88ede0ef7830ea0cbbf5ecc299d63248d0eee4bf92f303fdf76f15"} Jan 23 10:53:28 crc kubenswrapper[4689]: I0123 10:53:28.363610 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" event={"ID":"9c5d05df-7a1c-4c0c-b791-cc8e932d2560","Type":"ContainerStarted","Data":"454a4c74dbb6a1652d2c0e501a9c5aa9bf1591e3de67f7eb8f95d1bf4f84cab4"} Jan 23 10:53:28 crc kubenswrapper[4689]: I0123 10:53:28.363971 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:28 crc kubenswrapper[4689]: I0123 10:53:28.371037 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 10:53:28 crc kubenswrapper[4689]: I0123 10:53:28.404070 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podStartSLOduration=5.404054755 podStartE2EDuration="5.404054755s" podCreationTimestamp="2026-01-23 10:53:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:53:28.383624473 +0000 UTC m=+273.008304322" watchObservedRunningTime="2026-01-23 10:53:28.404054755 +0000 UTC m=+273.028734614" Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.315746 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r"] Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.316521 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" podUID="01b03027-0731-4ad0-aa7f-266103bbb237" containerName="route-controller-manager" containerID="cri-o://b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed" gracePeriod=30 Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.736599 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.921424 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01b03027-0731-4ad0-aa7f-266103bbb237-serving-cert\") pod \"01b03027-0731-4ad0-aa7f-266103bbb237\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.921502 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-client-ca\") pod \"01b03027-0731-4ad0-aa7f-266103bbb237\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.921566 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pglb\" (UniqueName: \"kubernetes.io/projected/01b03027-0731-4ad0-aa7f-266103bbb237-kube-api-access-4pglb\") pod \"01b03027-0731-4ad0-aa7f-266103bbb237\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.921626 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-config\") pod \"01b03027-0731-4ad0-aa7f-266103bbb237\" (UID: \"01b03027-0731-4ad0-aa7f-266103bbb237\") " Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.922467 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-config" (OuterVolumeSpecName: "config") pod "01b03027-0731-4ad0-aa7f-266103bbb237" (UID: "01b03027-0731-4ad0-aa7f-266103bbb237"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.922538 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-client-ca" (OuterVolumeSpecName: "client-ca") pod "01b03027-0731-4ad0-aa7f-266103bbb237" (UID: "01b03027-0731-4ad0-aa7f-266103bbb237"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.933750 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01b03027-0731-4ad0-aa7f-266103bbb237-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01b03027-0731-4ad0-aa7f-266103bbb237" (UID: "01b03027-0731-4ad0-aa7f-266103bbb237"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:53:35 crc kubenswrapper[4689]: I0123 10:53:35.938635 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01b03027-0731-4ad0-aa7f-266103bbb237-kube-api-access-4pglb" (OuterVolumeSpecName: "kube-api-access-4pglb") pod "01b03027-0731-4ad0-aa7f-266103bbb237" (UID: "01b03027-0731-4ad0-aa7f-266103bbb237"). InnerVolumeSpecName "kube-api-access-4pglb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.023419 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.023454 4689 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01b03027-0731-4ad0-aa7f-266103bbb237-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.023466 4689 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01b03027-0731-4ad0-aa7f-266103bbb237-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.023477 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pglb\" (UniqueName: \"kubernetes.io/projected/01b03027-0731-4ad0-aa7f-266103bbb237-kube-api-access-4pglb\") on node \"crc\" DevicePath \"\"" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.402654 4689 generic.go:334] "Generic (PLEG): container finished" podID="01b03027-0731-4ad0-aa7f-266103bbb237" containerID="b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed" exitCode=0 Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.402705 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" event={"ID":"01b03027-0731-4ad0-aa7f-266103bbb237","Type":"ContainerDied","Data":"b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed"} Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.402713 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.402741 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r" event={"ID":"01b03027-0731-4ad0-aa7f-266103bbb237","Type":"ContainerDied","Data":"dee6fc4db50c71fbb34411cc3fc0db5696c8d2b99f2f4d86af21871177043cd2"} Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.402769 4689 scope.go:117] "RemoveContainer" containerID="b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.428547 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r"] Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.429432 4689 scope.go:117] "RemoveContainer" containerID="b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed" Jan 23 10:53:36 crc kubenswrapper[4689]: E0123 10:53:36.430109 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed\": container with ID starting with b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed not found: ID does not exist" containerID="b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.430259 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed"} err="failed to get container status \"b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed\": rpc error: code = NotFound desc = could not find container \"b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed\": container with ID starting with b3ca26232119f4abd1a65b1c2331a6b18dd5ea10f3636e43ca77f73ee30708ed not found: ID does not exist" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.434562 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-646bddd5f-2sd6r"] Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.488370 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76"] Jan 23 10:53:36 crc kubenswrapper[4689]: E0123 10:53:36.488657 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b03027-0731-4ad0-aa7f-266103bbb237" containerName="route-controller-manager" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.488674 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b03027-0731-4ad0-aa7f-266103bbb237" containerName="route-controller-manager" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.488822 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="01b03027-0731-4ad0-aa7f-266103bbb237" containerName="route-controller-manager" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.489335 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.491427 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.495477 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.495818 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.495957 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.496003 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.496133 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.500911 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76"] Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.630897 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e69fb667-9cde-4376-b12f-2847b0142176-client-ca\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.631247 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e69fb667-9cde-4376-b12f-2847b0142176-config\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.631268 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e69fb667-9cde-4376-b12f-2847b0142176-serving-cert\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.631298 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h72df\" (UniqueName: \"kubernetes.io/projected/e69fb667-9cde-4376-b12f-2847b0142176-kube-api-access-h72df\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.732347 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e69fb667-9cde-4376-b12f-2847b0142176-config\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.732426 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e69fb667-9cde-4376-b12f-2847b0142176-serving-cert\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.732487 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h72df\" (UniqueName: \"kubernetes.io/projected/e69fb667-9cde-4376-b12f-2847b0142176-kube-api-access-h72df\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.732575 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e69fb667-9cde-4376-b12f-2847b0142176-client-ca\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.734132 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e69fb667-9cde-4376-b12f-2847b0142176-client-ca\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.734539 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e69fb667-9cde-4376-b12f-2847b0142176-config\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.737518 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e69fb667-9cde-4376-b12f-2847b0142176-serving-cert\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.771618 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h72df\" (UniqueName: \"kubernetes.io/projected/e69fb667-9cde-4376-b12f-2847b0142176-kube-api-access-h72df\") pod \"route-controller-manager-7cc8986677-69l76\" (UID: \"e69fb667-9cde-4376-b12f-2847b0142176\") " pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:36 crc kubenswrapper[4689]: I0123 10:53:36.808758 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.280661 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76"] Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.409450 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" event={"ID":"e69fb667-9cde-4376-b12f-2847b0142176","Type":"ContainerStarted","Data":"1310f2493d20a187538f0e0e33f0c733c42bb03aef4a7942b922c34994a6316c"} Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.409906 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" event={"ID":"e69fb667-9cde-4376-b12f-2847b0142176","Type":"ContainerStarted","Data":"75e93c21b76a3f71c7e29b788949e2611db13fbfda1e1002f2ebfb3a561c15c5"} Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.409931 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.413738 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.413792 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.425785 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podStartSLOduration=2.425764599 podStartE2EDuration="2.425764599s" podCreationTimestamp="2026-01-23 10:53:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:53:37.424843336 +0000 UTC m=+282.049523195" watchObservedRunningTime="2026-01-23 10:53:37.425764599 +0000 UTC m=+282.050444458" Jan 23 10:53:37 crc kubenswrapper[4689]: I0123 10:53:37.651533 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01b03027-0731-4ad0-aa7f-266103bbb237" path="/var/lib/kubelet/pods/01b03027-0731-4ad0-aa7f-266103bbb237/volumes" Jan 23 10:53:38 crc kubenswrapper[4689]: I0123 10:53:38.422558 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 10:53:55 crc kubenswrapper[4689]: I0123 10:53:55.522877 4689 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 23 10:53:55 crc kubenswrapper[4689]: I0123 10:53:55.864963 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pjr88"] Jan 23 10:53:55 crc kubenswrapper[4689]: I0123 10:53:55.865656 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:55 crc kubenswrapper[4689]: I0123 10:53:55.875892 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pjr88"] Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018323 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/64f769e0-be75-4b1f-8cbb-587842d51589-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018589 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/64f769e0-be75-4b1f-8cbb-587842d51589-registry-certificates\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018618 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwllk\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-kube-api-access-jwllk\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018647 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018674 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-registry-tls\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018691 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/64f769e0-be75-4b1f-8cbb-587842d51589-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018727 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64f769e0-be75-4b1f-8cbb-587842d51589-trusted-ca\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.018757 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-bound-sa-token\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.054330 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.120079 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-registry-tls\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.120446 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/64f769e0-be75-4b1f-8cbb-587842d51589-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.120697 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64f769e0-be75-4b1f-8cbb-587842d51589-trusted-ca\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.121113 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-bound-sa-token\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.121007 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/64f769e0-be75-4b1f-8cbb-587842d51589-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.121350 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/64f769e0-be75-4b1f-8cbb-587842d51589-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.121552 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/64f769e0-be75-4b1f-8cbb-587842d51589-registry-certificates\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.121633 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwllk\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-kube-api-access-jwllk\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.121846 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64f769e0-be75-4b1f-8cbb-587842d51589-trusted-ca\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.122416 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/64f769e0-be75-4b1f-8cbb-587842d51589-registry-certificates\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.127062 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-registry-tls\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.128332 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/64f769e0-be75-4b1f-8cbb-587842d51589-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.140733 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-bound-sa-token\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.141250 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwllk\" (UniqueName: \"kubernetes.io/projected/64f769e0-be75-4b1f-8cbb-587842d51589-kube-api-access-jwllk\") pod \"image-registry-66df7c8f76-pjr88\" (UID: \"64f769e0-be75-4b1f-8cbb-587842d51589\") " pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.179826 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:56 crc kubenswrapper[4689]: I0123 10:53:56.606908 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pjr88"] Jan 23 10:53:56 crc kubenswrapper[4689]: W0123 10:53:56.619260 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64f769e0_be75_4b1f_8cbb_587842d51589.slice/crio-c4ef01e82303a34f3bb78eda7de5fcfcdeb929224a14bea99965f35c0b8871b8 WatchSource:0}: Error finding container c4ef01e82303a34f3bb78eda7de5fcfcdeb929224a14bea99965f35c0b8871b8: Status 404 returned error can't find the container with id c4ef01e82303a34f3bb78eda7de5fcfcdeb929224a14bea99965f35c0b8871b8 Jan 23 10:53:57 crc kubenswrapper[4689]: I0123 10:53:57.545668 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" event={"ID":"64f769e0-be75-4b1f-8cbb-587842d51589","Type":"ContainerStarted","Data":"124fdd031e1adbb915bd981f1d70c8b6fbe7b04b210766c7de8d99eaa4f11897"} Jan 23 10:53:57 crc kubenswrapper[4689]: I0123 10:53:57.545754 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" event={"ID":"64f769e0-be75-4b1f-8cbb-587842d51589","Type":"ContainerStarted","Data":"c4ef01e82303a34f3bb78eda7de5fcfcdeb929224a14bea99965f35c0b8871b8"} Jan 23 10:53:57 crc kubenswrapper[4689]: I0123 10:53:57.545875 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:53:57 crc kubenswrapper[4689]: I0123 10:53:57.573626 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podStartSLOduration=2.5736087899999998 podStartE2EDuration="2.57360879s" podCreationTimestamp="2026-01-23 10:53:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:53:57.565239121 +0000 UTC m=+302.189918990" watchObservedRunningTime="2026-01-23 10:53:57.57360879 +0000 UTC m=+302.198288649" Jan 23 10:54:16 crc kubenswrapper[4689]: I0123 10:54:16.189432 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 10:54:16 crc kubenswrapper[4689]: I0123 10:54:16.277035 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sbk9v"] Jan 23 10:54:33 crc kubenswrapper[4689]: I0123 10:54:33.921681 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:54:33 crc kubenswrapper[4689]: I0123 10:54:33.922454 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.769084 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-94lj8"] Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.770001 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-94lj8" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="registry-server" containerID="cri-o://ba2e53c279f7b1262c3fc70bc1828219dd9161c285d59a0040469c0e4adfa5e8" gracePeriod=30 Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.781601 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdzr6"] Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.781984 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vdzr6" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="registry-server" containerID="cri-o://30754005cc74e1172714ebdcace87dcffc582c8cb52cba88ac5e5106982b30b8" gracePeriod=30 Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.805572 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-97xbl"] Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.828652 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mfvh2"] Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.828925 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mfvh2" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="registry-server" containerID="cri-o://b19ae6926d94f13787da82c56ac4bea84c7498228fde59e41ac700647ada6f7e" gracePeriod=30 Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.834123 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5x86b"] Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.834509 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5x86b" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="registry-server" containerID="cri-o://37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e" gracePeriod=30 Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.839976 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-s7k65"] Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.840609 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.855143 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-s7k65"] Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.945680 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" podUID="585f99c4-5f5e-4caf-9b9f-31a7f666bea8" containerName="marketplace-operator" containerID="cri-o://3802b9f725815de07e34f94030bad4c4680e3406a31e2434d8d604d5ab9e6d08" gracePeriod=30 Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.949653 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/386d7669-fab2-42b9-ac43-767d9ae837b8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.949727 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/386d7669-fab2-42b9-ac43-767d9ae837b8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:35 crc kubenswrapper[4689]: I0123 10:54:35.949749 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wjhp\" (UniqueName: \"kubernetes.io/projected/386d7669-fab2-42b9-ac43-767d9ae837b8-kube-api-access-8wjhp\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.051048 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/386d7669-fab2-42b9-ac43-767d9ae837b8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.051126 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/386d7669-fab2-42b9-ac43-767d9ae837b8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.051161 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wjhp\" (UniqueName: \"kubernetes.io/projected/386d7669-fab2-42b9-ac43-767d9ae837b8-kube-api-access-8wjhp\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.052171 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/386d7669-fab2-42b9-ac43-767d9ae837b8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.062679 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/386d7669-fab2-42b9-ac43-767d9ae837b8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.074650 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wjhp\" (UniqueName: \"kubernetes.io/projected/386d7669-fab2-42b9-ac43-767d9ae837b8-kube-api-access-8wjhp\") pod \"marketplace-operator-79b997595-s7k65\" (UID: \"386d7669-fab2-42b9-ac43-767d9ae837b8\") " pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.154936 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.610342 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-s7k65"] Jan 23 10:54:36 crc kubenswrapper[4689]: W0123 10:54:36.627346 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod386d7669_fab2_42b9_ac43_767d9ae837b8.slice/crio-9d09be6963604fa98dccc5027e397c7fb5841362710aef40da39c0b534a65786 WatchSource:0}: Error finding container 9d09be6963604fa98dccc5027e397c7fb5841362710aef40da39c0b534a65786: Status 404 returned error can't find the container with id 9d09be6963604fa98dccc5027e397c7fb5841362710aef40da39c0b534a65786 Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.858131 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.955375 4689 generic.go:334] "Generic (PLEG): container finished" podID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerID="37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e" exitCode=0 Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.955458 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5x86b" event={"ID":"3f3d6207-0bc4-441d-bf97-406ec30d09a1","Type":"ContainerDied","Data":"37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.955509 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5x86b" event={"ID":"3f3d6207-0bc4-441d-bf97-406ec30d09a1","Type":"ContainerDied","Data":"250e247e2d6e0947447cf6c3795ffd1464ee624034d9a8a2ceaad0bf2a9d17bc"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.955530 4689 scope.go:117] "RemoveContainer" containerID="37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.955688 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5x86b" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.961138 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-utilities\") pod \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.961183 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qk4t\" (UniqueName: \"kubernetes.io/projected/3f3d6207-0bc4-441d-bf97-406ec30d09a1-kube-api-access-7qk4t\") pod \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.961286 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-catalog-content\") pod \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\" (UID: \"3f3d6207-0bc4-441d-bf97-406ec30d09a1\") " Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.965211 4689 generic.go:334] "Generic (PLEG): container finished" podID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerID="30754005cc74e1172714ebdcace87dcffc582c8cb52cba88ac5e5106982b30b8" exitCode=0 Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.965333 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdzr6" event={"ID":"39c4a693-cb3e-49c8-9515-5b11f02093e0","Type":"ContainerDied","Data":"30754005cc74e1172714ebdcace87dcffc582c8cb52cba88ac5e5106982b30b8"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.965863 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-utilities" (OuterVolumeSpecName: "utilities") pod "3f3d6207-0bc4-441d-bf97-406ec30d09a1" (UID: "3f3d6207-0bc4-441d-bf97-406ec30d09a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.967725 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f3d6207-0bc4-441d-bf97-406ec30d09a1-kube-api-access-7qk4t" (OuterVolumeSpecName: "kube-api-access-7qk4t") pod "3f3d6207-0bc4-441d-bf97-406ec30d09a1" (UID: "3f3d6207-0bc4-441d-bf97-406ec30d09a1"). InnerVolumeSpecName "kube-api-access-7qk4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.968741 4689 generic.go:334] "Generic (PLEG): container finished" podID="00567441-00cd-4594-a8bd-93db333e1a18" containerID="b19ae6926d94f13787da82c56ac4bea84c7498228fde59e41ac700647ada6f7e" exitCode=0 Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.968817 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mfvh2" event={"ID":"00567441-00cd-4594-a8bd-93db333e1a18","Type":"ContainerDied","Data":"b19ae6926d94f13787da82c56ac4bea84c7498228fde59e41ac700647ada6f7e"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.970035 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" event={"ID":"386d7669-fab2-42b9-ac43-767d9ae837b8","Type":"ContainerStarted","Data":"c22c8a263c9a66fdb382ed6ea910e0e89ffcc82cab368c5ffb8082e5a722bd9c"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.970058 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" event={"ID":"386d7669-fab2-42b9-ac43-767d9ae837b8","Type":"ContainerStarted","Data":"9d09be6963604fa98dccc5027e397c7fb5841362710aef40da39c0b534a65786"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.971270 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.975619 4689 generic.go:334] "Generic (PLEG): container finished" podID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerID="ba2e53c279f7b1262c3fc70bc1828219dd9161c285d59a0040469c0e4adfa5e8" exitCode=0 Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.975676 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94lj8" event={"ID":"2e380655-ff7b-4f59-92ee-53074cc1b4ca","Type":"ContainerDied","Data":"ba2e53c279f7b1262c3fc70bc1828219dd9161c285d59a0040469c0e4adfa5e8"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.975895 4689 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-s7k65 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" start-of-body= Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.975941 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" podUID="386d7669-fab2-42b9-ac43-767d9ae837b8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.976627 4689 generic.go:334] "Generic (PLEG): container finished" podID="585f99c4-5f5e-4caf-9b9f-31a7f666bea8" containerID="3802b9f725815de07e34f94030bad4c4680e3406a31e2434d8d604d5ab9e6d08" exitCode=0 Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.976655 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" event={"ID":"585f99c4-5f5e-4caf-9b9f-31a7f666bea8","Type":"ContainerDied","Data":"3802b9f725815de07e34f94030bad4c4680e3406a31e2434d8d604d5ab9e6d08"} Jan 23 10:54:36 crc kubenswrapper[4689]: I0123 10:54:36.997050 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" podStartSLOduration=1.997032631 podStartE2EDuration="1.997032631s" podCreationTimestamp="2026-01-23 10:54:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:54:36.992102352 +0000 UTC m=+341.616782201" watchObservedRunningTime="2026-01-23 10:54:36.997032631 +0000 UTC m=+341.621712490" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.001962 4689 scope.go:117] "RemoveContainer" containerID="312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.030103 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.038918 4689 scope.go:117] "RemoveContainer" containerID="d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.039137 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.042483 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.063058 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.063094 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qk4t\" (UniqueName: \"kubernetes.io/projected/3f3d6207-0bc4-441d-bf97-406ec30d09a1-kube-api-access-7qk4t\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.067752 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.084439 4689 scope.go:117] "RemoveContainer" containerID="37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.084857 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3f3d6207-0bc4-441d-bf97-406ec30d09a1" (UID: "3f3d6207-0bc4-441d-bf97-406ec30d09a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: E0123 10:54:37.084999 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e\": container with ID starting with 37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e not found: ID does not exist" containerID="37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.085044 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e"} err="failed to get container status \"37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e\": rpc error: code = NotFound desc = could not find container \"37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e\": container with ID starting with 37b6228765c9087c9def303cd55bfb4527005d739109eb07ce130f642166b88e not found: ID does not exist" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.085071 4689 scope.go:117] "RemoveContainer" containerID="312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752" Jan 23 10:54:37 crc kubenswrapper[4689]: E0123 10:54:37.085549 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752\": container with ID starting with 312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752 not found: ID does not exist" containerID="312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.085598 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752"} err="failed to get container status \"312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752\": rpc error: code = NotFound desc = could not find container \"312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752\": container with ID starting with 312b22c640fef87cf8e29f7cd7d286224f87c8534d43b0cb8b091bdf7afab752 not found: ID does not exist" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.085625 4689 scope.go:117] "RemoveContainer" containerID="d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6" Jan 23 10:54:37 crc kubenswrapper[4689]: E0123 10:54:37.087186 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6\": container with ID starting with d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6 not found: ID does not exist" containerID="d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.087209 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6"} err="failed to get container status \"d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6\": rpc error: code = NotFound desc = could not find container \"d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6\": container with ID starting with d207403a06d7da214fefbfb79a17b5ca71a9fc49e2925d411d523eb11e6986a6 not found: ID does not exist" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163688 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-utilities\") pod \"00567441-00cd-4594-a8bd-93db333e1a18\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163740 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzj22\" (UniqueName: \"kubernetes.io/projected/00567441-00cd-4594-a8bd-93db333e1a18-kube-api-access-jzj22\") pod \"00567441-00cd-4594-a8bd-93db333e1a18\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163760 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8jcs\" (UniqueName: \"kubernetes.io/projected/2e380655-ff7b-4f59-92ee-53074cc1b4ca-kube-api-access-v8jcs\") pod \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163784 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d2kj\" (UniqueName: \"kubernetes.io/projected/39c4a693-cb3e-49c8-9515-5b11f02093e0-kube-api-access-8d2kj\") pod \"39c4a693-cb3e-49c8-9515-5b11f02093e0\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163813 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-catalog-content\") pod \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163862 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-utilities\") pod \"39c4a693-cb3e-49c8-9515-5b11f02093e0\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163887 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-trusted-ca\") pod \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163927 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-catalog-content\") pod \"39c4a693-cb3e-49c8-9515-5b11f02093e0\" (UID: \"39c4a693-cb3e-49c8-9515-5b11f02093e0\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163942 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-catalog-content\") pod \"00567441-00cd-4594-a8bd-93db333e1a18\" (UID: \"00567441-00cd-4594-a8bd-93db333e1a18\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163964 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-utilities\") pod \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\" (UID: \"2e380655-ff7b-4f59-92ee-53074cc1b4ca\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.163991 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-operator-metrics\") pod \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.164018 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77vxh\" (UniqueName: \"kubernetes.io/projected/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-kube-api-access-77vxh\") pod \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\" (UID: \"585f99c4-5f5e-4caf-9b9f-31a7f666bea8\") " Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.164244 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f3d6207-0bc4-441d-bf97-406ec30d09a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.164581 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "585f99c4-5f5e-4caf-9b9f-31a7f666bea8" (UID: "585f99c4-5f5e-4caf-9b9f-31a7f666bea8"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.165540 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-utilities" (OuterVolumeSpecName: "utilities") pod "2e380655-ff7b-4f59-92ee-53074cc1b4ca" (UID: "2e380655-ff7b-4f59-92ee-53074cc1b4ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.167586 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39c4a693-cb3e-49c8-9515-5b11f02093e0-kube-api-access-8d2kj" (OuterVolumeSpecName: "kube-api-access-8d2kj") pod "39c4a693-cb3e-49c8-9515-5b11f02093e0" (UID: "39c4a693-cb3e-49c8-9515-5b11f02093e0"). InnerVolumeSpecName "kube-api-access-8d2kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.167664 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-kube-api-access-77vxh" (OuterVolumeSpecName: "kube-api-access-77vxh") pod "585f99c4-5f5e-4caf-9b9f-31a7f666bea8" (UID: "585f99c4-5f5e-4caf-9b9f-31a7f666bea8"). InnerVolumeSpecName "kube-api-access-77vxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.167677 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-utilities" (OuterVolumeSpecName: "utilities") pod "00567441-00cd-4594-a8bd-93db333e1a18" (UID: "00567441-00cd-4594-a8bd-93db333e1a18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.167709 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e380655-ff7b-4f59-92ee-53074cc1b4ca-kube-api-access-v8jcs" (OuterVolumeSpecName: "kube-api-access-v8jcs") pod "2e380655-ff7b-4f59-92ee-53074cc1b4ca" (UID: "2e380655-ff7b-4f59-92ee-53074cc1b4ca"). InnerVolumeSpecName "kube-api-access-v8jcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.168184 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-utilities" (OuterVolumeSpecName: "utilities") pod "39c4a693-cb3e-49c8-9515-5b11f02093e0" (UID: "39c4a693-cb3e-49c8-9515-5b11f02093e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.168223 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00567441-00cd-4594-a8bd-93db333e1a18-kube-api-access-jzj22" (OuterVolumeSpecName: "kube-api-access-jzj22") pod "00567441-00cd-4594-a8bd-93db333e1a18" (UID: "00567441-00cd-4594-a8bd-93db333e1a18"). InnerVolumeSpecName "kube-api-access-jzj22". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.169936 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "585f99c4-5f5e-4caf-9b9f-31a7f666bea8" (UID: "585f99c4-5f5e-4caf-9b9f-31a7f666bea8"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.193383 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00567441-00cd-4594-a8bd-93db333e1a18" (UID: "00567441-00cd-4594-a8bd-93db333e1a18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.225374 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39c4a693-cb3e-49c8-9515-5b11f02093e0" (UID: "39c4a693-cb3e-49c8-9515-5b11f02093e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.227370 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e380655-ff7b-4f59-92ee-53074cc1b4ca" (UID: "2e380655-ff7b-4f59-92ee-53074cc1b4ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265912 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265938 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8jcs\" (UniqueName: \"kubernetes.io/projected/2e380655-ff7b-4f59-92ee-53074cc1b4ca-kube-api-access-v8jcs\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265947 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzj22\" (UniqueName: \"kubernetes.io/projected/00567441-00cd-4594-a8bd-93db333e1a18-kube-api-access-jzj22\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265957 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d2kj\" (UniqueName: \"kubernetes.io/projected/39c4a693-cb3e-49c8-9515-5b11f02093e0-kube-api-access-8d2kj\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265966 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265974 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265982 4689 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265990 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39c4a693-cb3e-49c8-9515-5b11f02093e0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.265998 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00567441-00cd-4594-a8bd-93db333e1a18-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.266005 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e380655-ff7b-4f59-92ee-53074cc1b4ca-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.266015 4689 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.266022 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77vxh\" (UniqueName: \"kubernetes.io/projected/585f99c4-5f5e-4caf-9b9f-31a7f666bea8-kube-api-access-77vxh\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.283720 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5x86b"] Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.286763 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5x86b"] Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.648703 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" path="/var/lib/kubelet/pods/3f3d6207-0bc4-441d-bf97-406ec30d09a1/volumes" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.986570 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdzr6" event={"ID":"39c4a693-cb3e-49c8-9515-5b11f02093e0","Type":"ContainerDied","Data":"fe2845691e7ac53220aab58b9a52195236fec29ef4aeef007ba387c969df9f1c"} Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.986647 4689 scope.go:117] "RemoveContainer" containerID="30754005cc74e1172714ebdcace87dcffc582c8cb52cba88ac5e5106982b30b8" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.986747 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdzr6" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.992439 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mfvh2" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.992731 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mfvh2" event={"ID":"00567441-00cd-4594-a8bd-93db333e1a18","Type":"ContainerDied","Data":"dfc3018e1d85ab93dac33dbfba98eff2df74585c401dc26253f8d2bfe52eef30"} Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.997051 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94lj8" Jan 23 10:54:37 crc kubenswrapper[4689]: I0123 10:54:37.997279 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94lj8" event={"ID":"2e380655-ff7b-4f59-92ee-53074cc1b4ca","Type":"ContainerDied","Data":"fe774a8cf5a5a32b2eff3ef50dcaceb27906dd75dcf9ea162a76c4be8e8f0913"} Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.000798 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.000848 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-97xbl" event={"ID":"585f99c4-5f5e-4caf-9b9f-31a7f666bea8","Type":"ContainerDied","Data":"3f7139052bf5ffff520c62185a9480cd5e73c5a8c9e36464a70fe198d19b72ac"} Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.002944 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.016760 4689 scope.go:117] "RemoveContainer" containerID="c9299ca8ae4da06baaecfcf242ab1589ddb2a21f0e13ee86b36786ee4ed31ebc" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.019560 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdzr6"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.023387 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vdzr6"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.035915 4689 scope.go:117] "RemoveContainer" containerID="aec784f024161a346ae7dfc12c002d47bde5ff706792e104ca2a3d69228392c7" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.067064 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mfvh2"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.072629 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mfvh2"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.074940 4689 scope.go:117] "RemoveContainer" containerID="b19ae6926d94f13787da82c56ac4bea84c7498228fde59e41ac700647ada6f7e" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.089761 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-94lj8"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.101291 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-94lj8"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.103933 4689 scope.go:117] "RemoveContainer" containerID="fc5c22c91d3a058788fd507405638de2d32f3e0c18f7b3116120c77eabefb36a" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.104694 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-97xbl"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.107544 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-97xbl"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.116373 4689 scope.go:117] "RemoveContainer" containerID="fa39da2fe7c6e7fa5082051f39bc80eaca6840334eefa6caf6fa46acaa688d29" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.134002 4689 scope.go:117] "RemoveContainer" containerID="ba2e53c279f7b1262c3fc70bc1828219dd9161c285d59a0040469c0e4adfa5e8" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.150205 4689 scope.go:117] "RemoveContainer" containerID="199114f98bad9852b5e0a763b8d2e54ceb8300c0f0d4ecea21d4ce80193e2d34" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.164744 4689 scope.go:117] "RemoveContainer" containerID="bea4fa2a0212e24df053ba7bb7d573841ceb8d14ce26f577c5c105480d49e97c" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185371 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4zskl"] Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185588 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185601 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185614 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185622 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185635 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185642 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185653 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185660 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185670 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185677 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185686 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185695 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185703 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185710 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185721 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185728 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185736 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="585f99c4-5f5e-4caf-9b9f-31a7f666bea8" containerName="marketplace-operator" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185743 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="585f99c4-5f5e-4caf-9b9f-31a7f666bea8" containerName="marketplace-operator" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185756 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185763 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185774 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185781 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="extract-utilities" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185791 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185799 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="extract-content" Jan 23 10:54:38 crc kubenswrapper[4689]: E0123 10:54:38.185808 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185815 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185937 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="585f99c4-5f5e-4caf-9b9f-31a7f666bea8" containerName="marketplace-operator" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185954 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="00567441-00cd-4594-a8bd-93db333e1a18" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185965 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185979 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f3d6207-0bc4-441d-bf97-406ec30d09a1" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.185989 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" containerName="registry-server" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.186903 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.192683 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.192134 4689 scope.go:117] "RemoveContainer" containerID="3802b9f725815de07e34f94030bad4c4680e3406a31e2434d8d604d5ab9e6d08" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.195306 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4zskl"] Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.278777 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4v92\" (UniqueName: \"kubernetes.io/projected/0cb158e3-50d7-4750-8f95-c22d0a94a70f-kube-api-access-c4v92\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.278948 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cb158e3-50d7-4750-8f95-c22d0a94a70f-utilities\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.278996 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cb158e3-50d7-4750-8f95-c22d0a94a70f-catalog-content\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.380585 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cb158e3-50d7-4750-8f95-c22d0a94a70f-utilities\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.380646 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cb158e3-50d7-4750-8f95-c22d0a94a70f-catalog-content\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.380733 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4v92\" (UniqueName: \"kubernetes.io/projected/0cb158e3-50d7-4750-8f95-c22d0a94a70f-kube-api-access-c4v92\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.381070 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cb158e3-50d7-4750-8f95-c22d0a94a70f-utilities\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.381378 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cb158e3-50d7-4750-8f95-c22d0a94a70f-catalog-content\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.399081 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4v92\" (UniqueName: \"kubernetes.io/projected/0cb158e3-50d7-4750-8f95-c22d0a94a70f-kube-api-access-c4v92\") pod \"redhat-operators-4zskl\" (UID: \"0cb158e3-50d7-4750-8f95-c22d0a94a70f\") " pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.514280 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:38 crc kubenswrapper[4689]: I0123 10:54:38.943076 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4zskl"] Jan 23 10:54:38 crc kubenswrapper[4689]: W0123 10:54:38.953862 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0cb158e3_50d7_4750_8f95_c22d0a94a70f.slice/crio-c99dec40a44ee404c9870f36500f222be57725b93e0ef5e3786406c40a722b97 WatchSource:0}: Error finding container c99dec40a44ee404c9870f36500f222be57725b93e0ef5e3786406c40a722b97: Status 404 returned error can't find the container with id c99dec40a44ee404c9870f36500f222be57725b93e0ef5e3786406c40a722b97 Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.006506 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zskl" event={"ID":"0cb158e3-50d7-4750-8f95-c22d0a94a70f","Type":"ContainerStarted","Data":"c99dec40a44ee404c9870f36500f222be57725b93e0ef5e3786406c40a722b97"} Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.647374 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00567441-00cd-4594-a8bd-93db333e1a18" path="/var/lib/kubelet/pods/00567441-00cd-4594-a8bd-93db333e1a18/volumes" Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.648233 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e380655-ff7b-4f59-92ee-53074cc1b4ca" path="/var/lib/kubelet/pods/2e380655-ff7b-4f59-92ee-53074cc1b4ca/volumes" Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.648922 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39c4a693-cb3e-49c8-9515-5b11f02093e0" path="/var/lib/kubelet/pods/39c4a693-cb3e-49c8-9515-5b11f02093e0/volumes" Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.650195 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="585f99c4-5f5e-4caf-9b9f-31a7f666bea8" path="/var/lib/kubelet/pods/585f99c4-5f5e-4caf-9b9f-31a7f666bea8/volumes" Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.985254 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4hdbj"] Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.987187 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.990172 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 23 10:54:39 crc kubenswrapper[4689]: I0123 10:54:39.991204 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4hdbj"] Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.018481 4689 generic.go:334] "Generic (PLEG): container finished" podID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerID="af1b1fa5482c1266dfaa7db907aa454af571c15d1f336a3e083d37015e8ad209" exitCode=0 Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.018534 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zskl" event={"ID":"0cb158e3-50d7-4750-8f95-c22d0a94a70f","Type":"ContainerDied","Data":"af1b1fa5482c1266dfaa7db907aa454af571c15d1f336a3e083d37015e8ad209"} Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.102714 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7c32de2-03fb-4b12-8fdf-69161c24eed2-utilities\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.102758 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hp7h\" (UniqueName: \"kubernetes.io/projected/b7c32de2-03fb-4b12-8fdf-69161c24eed2-kube-api-access-2hp7h\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.102794 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7c32de2-03fb-4b12-8fdf-69161c24eed2-catalog-content\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.203763 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7c32de2-03fb-4b12-8fdf-69161c24eed2-utilities\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.204158 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hp7h\" (UniqueName: \"kubernetes.io/projected/b7c32de2-03fb-4b12-8fdf-69161c24eed2-kube-api-access-2hp7h\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.204218 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7c32de2-03fb-4b12-8fdf-69161c24eed2-catalog-content\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.204556 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7c32de2-03fb-4b12-8fdf-69161c24eed2-utilities\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.204858 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7c32de2-03fb-4b12-8fdf-69161c24eed2-catalog-content\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.223451 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hp7h\" (UniqueName: \"kubernetes.io/projected/b7c32de2-03fb-4b12-8fdf-69161c24eed2-kube-api-access-2hp7h\") pod \"community-operators-4hdbj\" (UID: \"b7c32de2-03fb-4b12-8fdf-69161c24eed2\") " pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.309867 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.582846 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x87gr"] Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.584606 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.587001 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.593052 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x87gr"] Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.699267 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4hdbj"] Jan 23 10:54:40 crc kubenswrapper[4689]: W0123 10:54:40.703649 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7c32de2_03fb_4b12_8fdf_69161c24eed2.slice/crio-e5ffa366cb808f7e60aa902213d508081752fd4b267c1356c3c2904effe40213 WatchSource:0}: Error finding container e5ffa366cb808f7e60aa902213d508081752fd4b267c1356c3c2904effe40213: Status 404 returned error can't find the container with id e5ffa366cb808f7e60aa902213d508081752fd4b267c1356c3c2904effe40213 Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.711374 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-utilities\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.712138 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g4t4\" (UniqueName: \"kubernetes.io/projected/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-kube-api-access-7g4t4\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.712487 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-catalog-content\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.813182 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g4t4\" (UniqueName: \"kubernetes.io/projected/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-kube-api-access-7g4t4\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.813232 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-catalog-content\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.813271 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-utilities\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.813707 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-utilities\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.813874 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-catalog-content\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.831788 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g4t4\" (UniqueName: \"kubernetes.io/projected/5fbbf7f9-c268-4a7b-a278-4f72a9099acf-kube-api-access-7g4t4\") pod \"certified-operators-x87gr\" (UID: \"5fbbf7f9-c268-4a7b-a278-4f72a9099acf\") " pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:40 crc kubenswrapper[4689]: I0123 10:54:40.912405 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:54:41 crc kubenswrapper[4689]: I0123 10:54:41.023942 4689 generic.go:334] "Generic (PLEG): container finished" podID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerID="e58f2b1f426b617dbed9f2b2eab56ad7cc5a6a7985e61a5d4875ed3072cd0d59" exitCode=0 Jan 23 10:54:41 crc kubenswrapper[4689]: I0123 10:54:41.023992 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4hdbj" event={"ID":"b7c32de2-03fb-4b12-8fdf-69161c24eed2","Type":"ContainerDied","Data":"e58f2b1f426b617dbed9f2b2eab56ad7cc5a6a7985e61a5d4875ed3072cd0d59"} Jan 23 10:54:41 crc kubenswrapper[4689]: I0123 10:54:41.024022 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4hdbj" event={"ID":"b7c32de2-03fb-4b12-8fdf-69161c24eed2","Type":"ContainerStarted","Data":"e5ffa366cb808f7e60aa902213d508081752fd4b267c1356c3c2904effe40213"} Jan 23 10:54:41 crc kubenswrapper[4689]: I0123 10:54:41.331023 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" podUID="9abf0120-b5d1-4f43-871d-b73a24382940" containerName="registry" containerID="cri-o://df0966327f05d6988e46dfb2c36a75b123792e5dc23bc9a146c847548a606c24" gracePeriod=30 Jan 23 10:54:41 crc kubenswrapper[4689]: I0123 10:54:41.368262 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x87gr"] Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.032266 4689 generic.go:334] "Generic (PLEG): container finished" podID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerID="7ae6245d5b4435031b1763eb4e29e9024cdb3b0135e7930997b357b8ccc05945" exitCode=0 Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.032332 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zskl" event={"ID":"0cb158e3-50d7-4750-8f95-c22d0a94a70f","Type":"ContainerDied","Data":"7ae6245d5b4435031b1763eb4e29e9024cdb3b0135e7930997b357b8ccc05945"} Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.035249 4689 generic.go:334] "Generic (PLEG): container finished" podID="9abf0120-b5d1-4f43-871d-b73a24382940" containerID="df0966327f05d6988e46dfb2c36a75b123792e5dc23bc9a146c847548a606c24" exitCode=0 Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.035297 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" event={"ID":"9abf0120-b5d1-4f43-871d-b73a24382940","Type":"ContainerDied","Data":"df0966327f05d6988e46dfb2c36a75b123792e5dc23bc9a146c847548a606c24"} Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.037526 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x87gr" event={"ID":"5fbbf7f9-c268-4a7b-a278-4f72a9099acf","Type":"ContainerStarted","Data":"3d0c0be0ff79df75fb5f893d2d4fcd3060afa28583a5e2e9aef92a07da02eaa6"} Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.163675 4689 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-sbk9v container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.33:5000/healthz\": dial tcp 10.217.0.33:5000: connect: connection refused" start-of-body= Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.163737 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" podUID="9abf0120-b5d1-4f43-871d-b73a24382940" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.33:5000/healthz\": dial tcp 10.217.0.33:5000: connect: connection refused" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.396007 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h6w2m"] Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.397285 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.400443 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6w2m"] Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.432327 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.542838 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-utilities\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.542995 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdmxd\" (UniqueName: \"kubernetes.io/projected/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-kube-api-access-wdmxd\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.543032 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-catalog-content\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.643915 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-catalog-content\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.643983 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-utilities\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.644059 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdmxd\" (UniqueName: \"kubernetes.io/projected/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-kube-api-access-wdmxd\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.644775 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-utilities\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.644821 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-catalog-content\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.669706 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdmxd\" (UniqueName: \"kubernetes.io/projected/5f0a7d6b-1743-49ab-9f0b-2742ce992ecf-kube-api-access-wdmxd\") pod \"redhat-marketplace-h6w2m\" (UID: \"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf\") " pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.747343 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:42 crc kubenswrapper[4689]: I0123 10:54:42.882209 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.044163 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zskl" event={"ID":"0cb158e3-50d7-4750-8f95-c22d0a94a70f","Type":"ContainerStarted","Data":"0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620"} Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.047532 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.047597 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sbk9v" event={"ID":"9abf0120-b5d1-4f43-871d-b73a24382940","Type":"ContainerDied","Data":"45cebe81ea273c37702d1d66549c7e5b90e171c0057babb6427f85bc3667b2b3"} Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.047648 4689 scope.go:117] "RemoveContainer" containerID="df0966327f05d6988e46dfb2c36a75b123792e5dc23bc9a146c847548a606c24" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.048786 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-bound-sa-token\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.048843 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-registry-tls\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.048920 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-trusted-ca\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.048964 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-registry-certificates\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.048990 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5v8k\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-kube-api-access-n5v8k\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.049012 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9abf0120-b5d1-4f43-871d-b73a24382940-installation-pull-secrets\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.049086 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9abf0120-b5d1-4f43-871d-b73a24382940-ca-trust-extracted\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.049218 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"9abf0120-b5d1-4f43-871d-b73a24382940\" (UID: \"9abf0120-b5d1-4f43-871d-b73a24382940\") " Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.050430 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.051393 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.055599 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-kube-api-access-n5v8k" (OuterVolumeSpecName: "kube-api-access-n5v8k") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "kube-api-access-n5v8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.055926 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.055999 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9abf0120-b5d1-4f43-871d-b73a24382940-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.056225 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4hdbj" event={"ID":"b7c32de2-03fb-4b12-8fdf-69161c24eed2","Type":"ContainerStarted","Data":"d828d49d0a544fc6e9a82621c5fca0c1ff12db874df305d08a59a2c5753d3b9d"} Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.057797 4689 generic.go:334] "Generic (PLEG): container finished" podID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerID="11e2b136672d61575bec4f7874358f3321d447163f8de34a714484cfb9fe21a9" exitCode=0 Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.057832 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x87gr" event={"ID":"5fbbf7f9-c268-4a7b-a278-4f72a9099acf","Type":"ContainerDied","Data":"11e2b136672d61575bec4f7874358f3321d447163f8de34a714484cfb9fe21a9"} Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.058497 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.062930 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.067198 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9abf0120-b5d1-4f43-871d-b73a24382940-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "9abf0120-b5d1-4f43-871d-b73a24382940" (UID: "9abf0120-b5d1-4f43-871d-b73a24382940"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.071820 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4zskl" podStartSLOduration=2.400154562 podStartE2EDuration="5.071780494s" podCreationTimestamp="2026-01-23 10:54:38 +0000 UTC" firstStartedPulling="2026-01-23 10:54:40.020515352 +0000 UTC m=+344.645195211" lastFinishedPulling="2026-01-23 10:54:42.692141284 +0000 UTC m=+347.316821143" observedRunningTime="2026-01-23 10:54:43.063329905 +0000 UTC m=+347.688009764" watchObservedRunningTime="2026-01-23 10:54:43.071780494 +0000 UTC m=+347.696460343" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.141492 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6w2m"] Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.150607 4689 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.150632 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5v8k\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-kube-api-access-n5v8k\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.150641 4689 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9abf0120-b5d1-4f43-871d-b73a24382940-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.150650 4689 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9abf0120-b5d1-4f43-871d-b73a24382940-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.150658 4689 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.150666 4689 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9abf0120-b5d1-4f43-871d-b73a24382940-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.150674 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9abf0120-b5d1-4f43-871d-b73a24382940-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.383581 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sbk9v"] Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.397514 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sbk9v"] Jan 23 10:54:43 crc kubenswrapper[4689]: I0123 10:54:43.645518 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9abf0120-b5d1-4f43-871d-b73a24382940" path="/var/lib/kubelet/pods/9abf0120-b5d1-4f43-871d-b73a24382940/volumes" Jan 23 10:54:44 crc kubenswrapper[4689]: I0123 10:54:44.063903 4689 generic.go:334] "Generic (PLEG): container finished" podID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerID="d828d49d0a544fc6e9a82621c5fca0c1ff12db874df305d08a59a2c5753d3b9d" exitCode=0 Jan 23 10:54:44 crc kubenswrapper[4689]: I0123 10:54:44.063985 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4hdbj" event={"ID":"b7c32de2-03fb-4b12-8fdf-69161c24eed2","Type":"ContainerDied","Data":"d828d49d0a544fc6e9a82621c5fca0c1ff12db874df305d08a59a2c5753d3b9d"} Jan 23 10:54:44 crc kubenswrapper[4689]: I0123 10:54:44.065716 4689 generic.go:334] "Generic (PLEG): container finished" podID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerID="a938780c0ac7fbdb83931d160703a5d37324178fc2aa6fedcb004f13e4d7490f" exitCode=0 Jan 23 10:54:44 crc kubenswrapper[4689]: I0123 10:54:44.066278 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6w2m" event={"ID":"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf","Type":"ContainerDied","Data":"a938780c0ac7fbdb83931d160703a5d37324178fc2aa6fedcb004f13e4d7490f"} Jan 23 10:54:44 crc kubenswrapper[4689]: I0123 10:54:44.066323 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6w2m" event={"ID":"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf","Type":"ContainerStarted","Data":"8684bf5c6b073a8982e0419cca7adfcdd22e978d6253b2a1add420a866654fa7"} Jan 23 10:54:48 crc kubenswrapper[4689]: I0123 10:54:48.514664 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:48 crc kubenswrapper[4689]: I0123 10:54:48.515015 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:48 crc kubenswrapper[4689]: I0123 10:54:48.574123 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:49 crc kubenswrapper[4689]: I0123 10:54:49.090325 4689 generic.go:334] "Generic (PLEG): container finished" podID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerID="cd24a885bd2cebbe1df2f574159aad12b2afbb8dd20e91e9c75b71b91b707dca" exitCode=0 Jan 23 10:54:49 crc kubenswrapper[4689]: I0123 10:54:49.091102 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6w2m" event={"ID":"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf","Type":"ContainerDied","Data":"cd24a885bd2cebbe1df2f574159aad12b2afbb8dd20e91e9c75b71b91b707dca"} Jan 23 10:54:49 crc kubenswrapper[4689]: I0123 10:54:49.140706 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 10:54:51 crc kubenswrapper[4689]: I0123 10:54:51.112507 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4hdbj" event={"ID":"b7c32de2-03fb-4b12-8fdf-69161c24eed2","Type":"ContainerStarted","Data":"0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988"} Jan 23 10:54:51 crc kubenswrapper[4689]: I0123 10:54:51.116881 4689 generic.go:334] "Generic (PLEG): container finished" podID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerID="bfced869e9d1bdba2c2ddf11901b983b563cc88830bcc07af5af9f8771271949" exitCode=0 Jan 23 10:54:51 crc kubenswrapper[4689]: I0123 10:54:51.117003 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x87gr" event={"ID":"5fbbf7f9-c268-4a7b-a278-4f72a9099acf","Type":"ContainerDied","Data":"bfced869e9d1bdba2c2ddf11901b983b563cc88830bcc07af5af9f8771271949"} Jan 23 10:54:51 crc kubenswrapper[4689]: I0123 10:54:51.121090 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6w2m" event={"ID":"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf","Type":"ContainerStarted","Data":"6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892"} Jan 23 10:54:51 crc kubenswrapper[4689]: I0123 10:54:51.140138 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4hdbj" podStartSLOduration=3.705189753 podStartE2EDuration="12.140123098s" podCreationTimestamp="2026-01-23 10:54:39 +0000 UTC" firstStartedPulling="2026-01-23 10:54:41.025466228 +0000 UTC m=+345.650146087" lastFinishedPulling="2026-01-23 10:54:49.460399573 +0000 UTC m=+354.085079432" observedRunningTime="2026-01-23 10:54:51.13830278 +0000 UTC m=+355.762982649" watchObservedRunningTime="2026-01-23 10:54:51.140123098 +0000 UTC m=+355.764802957" Jan 23 10:54:51 crc kubenswrapper[4689]: I0123 10:54:51.155812 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h6w2m" podStartSLOduration=2.506971094 podStartE2EDuration="9.155790795s" podCreationTimestamp="2026-01-23 10:54:42 +0000 UTC" firstStartedPulling="2026-01-23 10:54:44.067602433 +0000 UTC m=+348.692282292" lastFinishedPulling="2026-01-23 10:54:50.716422134 +0000 UTC m=+355.341101993" observedRunningTime="2026-01-23 10:54:51.155778854 +0000 UTC m=+355.780458713" watchObservedRunningTime="2026-01-23 10:54:51.155790795 +0000 UTC m=+355.780470654" Jan 23 10:54:52 crc kubenswrapper[4689]: I0123 10:54:52.747847 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:52 crc kubenswrapper[4689]: I0123 10:54:52.747892 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:54:52 crc kubenswrapper[4689]: I0123 10:54:52.792550 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:55:00 crc kubenswrapper[4689]: I0123 10:55:00.168400 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x87gr" event={"ID":"5fbbf7f9-c268-4a7b-a278-4f72a9099acf","Type":"ContainerStarted","Data":"ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb"} Jan 23 10:55:00 crc kubenswrapper[4689]: I0123 10:55:00.188555 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x87gr" podStartSLOduration=3.398728688 podStartE2EDuration="20.188534161s" podCreationTimestamp="2026-01-23 10:54:40 +0000 UTC" firstStartedPulling="2026-01-23 10:54:43.062615687 +0000 UTC m=+347.687295546" lastFinishedPulling="2026-01-23 10:54:59.85242114 +0000 UTC m=+364.477101019" observedRunningTime="2026-01-23 10:55:00.186469718 +0000 UTC m=+364.811149587" watchObservedRunningTime="2026-01-23 10:55:00.188534161 +0000 UTC m=+364.813214020" Jan 23 10:55:00 crc kubenswrapper[4689]: I0123 10:55:00.309996 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:55:00 crc kubenswrapper[4689]: I0123 10:55:00.310073 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:55:00 crc kubenswrapper[4689]: I0123 10:55:00.349865 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:55:00 crc kubenswrapper[4689]: I0123 10:55:00.912897 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:55:00 crc kubenswrapper[4689]: I0123 10:55:00.913221 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:55:01 crc kubenswrapper[4689]: I0123 10:55:01.215111 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 10:55:01 crc kubenswrapper[4689]: I0123 10:55:01.965745 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 10:55:01 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 10:55:01 crc kubenswrapper[4689]: > Jan 23 10:55:02 crc kubenswrapper[4689]: I0123 10:55:02.788428 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 10:55:03 crc kubenswrapper[4689]: I0123 10:55:03.311171 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:55:03 crc kubenswrapper[4689]: I0123 10:55:03.311246 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.038822 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr"] Jan 23 10:55:08 crc kubenswrapper[4689]: E0123 10:55:08.039057 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9abf0120-b5d1-4f43-871d-b73a24382940" containerName="registry" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.039073 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="9abf0120-b5d1-4f43-871d-b73a24382940" containerName="registry" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.039199 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="9abf0120-b5d1-4f43-871d-b73a24382940" containerName="registry" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.039624 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.041533 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.041563 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.041822 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.042055 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.042551 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.051532 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr"] Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.166798 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.167010 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.167139 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwz5g\" (UniqueName: \"kubernetes.io/projected/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-kube-api-access-cwz5g\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.268549 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.268613 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.268645 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwz5g\" (UniqueName: \"kubernetes.io/projected/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-kube-api-access-cwz5g\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.270036 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.277794 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.292991 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwz5g\" (UniqueName: \"kubernetes.io/projected/a6d5bbd1-6663-420f-8aaa-ea2e12507c22-kube-api-access-cwz5g\") pod \"cluster-monitoring-operator-6d5b84845-6gzgr\" (UID: \"a6d5bbd1-6663-420f-8aaa-ea2e12507c22\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.357290 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" Jan 23 10:55:08 crc kubenswrapper[4689]: I0123 10:55:08.790703 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr"] Jan 23 10:55:08 crc kubenswrapper[4689]: W0123 10:55:08.794824 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6d5bbd1_6663_420f_8aaa_ea2e12507c22.slice/crio-35fe4c83b4bbdd8f90ebf2552f343947b2deb76a58855a34ab8b5423e8e4578c WatchSource:0}: Error finding container 35fe4c83b4bbdd8f90ebf2552f343947b2deb76a58855a34ab8b5423e8e4578c: Status 404 returned error can't find the container with id 35fe4c83b4bbdd8f90ebf2552f343947b2deb76a58855a34ab8b5423e8e4578c Jan 23 10:55:09 crc kubenswrapper[4689]: I0123 10:55:09.225544 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" event={"ID":"a6d5bbd1-6663-420f-8aaa-ea2e12507c22","Type":"ContainerStarted","Data":"35fe4c83b4bbdd8f90ebf2552f343947b2deb76a58855a34ab8b5423e8e4578c"} Jan 23 10:55:10 crc kubenswrapper[4689]: I0123 10:55:10.949854 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:55:10 crc kubenswrapper[4689]: I0123 10:55:10.996221 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.740087 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq"] Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.741242 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.742849 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-qhzmg" Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.743349 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.759985 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq"] Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.822525 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/e7dd97d0-dc97-4bfa-9048-a065d03576ad-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-flfwq\" (UID: \"e7dd97d0-dc97-4bfa-9048-a065d03576ad\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.923623 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/e7dd97d0-dc97-4bfa-9048-a065d03576ad-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-flfwq\" (UID: \"e7dd97d0-dc97-4bfa-9048-a065d03576ad\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 10:55:12 crc kubenswrapper[4689]: I0123 10:55:12.934818 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/e7dd97d0-dc97-4bfa-9048-a065d03576ad-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-flfwq\" (UID: \"e7dd97d0-dc97-4bfa-9048-a065d03576ad\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 10:55:13 crc kubenswrapper[4689]: I0123 10:55:13.057329 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 10:55:13 crc kubenswrapper[4689]: I0123 10:55:13.295844 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" event={"ID":"a6d5bbd1-6663-420f-8aaa-ea2e12507c22","Type":"ContainerStarted","Data":"eb5fc5d792530ecb3bad1742a80c1348ac5a9b00b23675c4574d93cd126e9a63"} Jan 23 10:55:13 crc kubenswrapper[4689]: I0123 10:55:13.316688 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6gzgr" podStartSLOduration=2.130699926 podStartE2EDuration="5.316671333s" podCreationTimestamp="2026-01-23 10:55:08 +0000 UTC" firstStartedPulling="2026-01-23 10:55:08.797695978 +0000 UTC m=+373.422375847" lastFinishedPulling="2026-01-23 10:55:11.983667395 +0000 UTC m=+376.608347254" observedRunningTime="2026-01-23 10:55:13.310562504 +0000 UTC m=+377.935242373" watchObservedRunningTime="2026-01-23 10:55:13.316671333 +0000 UTC m=+377.941351192" Jan 23 10:55:13 crc kubenswrapper[4689]: I0123 10:55:13.432957 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq"] Jan 23 10:55:14 crc kubenswrapper[4689]: I0123 10:55:14.302881 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" event={"ID":"e7dd97d0-dc97-4bfa-9048-a065d03576ad","Type":"ContainerStarted","Data":"c53de5dac1eab91aed92b8a3130d820bfaa68b45be0a915779328a506cabf8ad"} Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.326617 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" event={"ID":"e7dd97d0-dc97-4bfa-9048-a065d03576ad","Type":"ContainerStarted","Data":"a395e68b08186aa0babb4b1e0bcaf50412e72b1a503e7f53fee931e31e9a89e5"} Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.327262 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.336908 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.356737 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podStartSLOduration=2.13590798 podStartE2EDuration="4.356711632s" podCreationTimestamp="2026-01-23 10:55:12 +0000 UTC" firstStartedPulling="2026-01-23 10:55:13.438514005 +0000 UTC m=+378.063193864" lastFinishedPulling="2026-01-23 10:55:15.659317657 +0000 UTC m=+380.283997516" observedRunningTime="2026-01-23 10:55:16.349707341 +0000 UTC m=+380.974387210" watchObservedRunningTime="2026-01-23 10:55:16.356711632 +0000 UTC m=+380.981391531" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.828522 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-nzc9w"] Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.830179 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.835358 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-p2p6s" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.835421 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.835617 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.852672 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.856610 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-nzc9w"] Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.936841 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.937108 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/638adf9e-8c03-4c7a-8cb6-1030d64b369c-metrics-client-ca\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.937187 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7vh5\" (UniqueName: \"kubernetes.io/projected/638adf9e-8c03-4c7a-8cb6-1030d64b369c-kube-api-access-q7vh5\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:16 crc kubenswrapper[4689]: I0123 10:55:16.937228 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.037981 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.038047 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/638adf9e-8c03-4c7a-8cb6-1030d64b369c-metrics-client-ca\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.038081 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7vh5\" (UniqueName: \"kubernetes.io/projected/638adf9e-8c03-4c7a-8cb6-1030d64b369c-kube-api-access-q7vh5\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.038113 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: E0123 10:55:17.038189 4689 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-tls: secret "prometheus-operator-tls" not found Jan 23 10:55:17 crc kubenswrapper[4689]: E0123 10:55:17.038280 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-tls podName:638adf9e-8c03-4c7a-8cb6-1030d64b369c nodeName:}" failed. No retries permitted until 2026-01-23 10:55:17.538259037 +0000 UTC m=+382.162938896 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "prometheus-operator-tls" (UniqueName: "kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-tls") pod "prometheus-operator-db54df47d-nzc9w" (UID: "638adf9e-8c03-4c7a-8cb6-1030d64b369c") : secret "prometheus-operator-tls" not found Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.039040 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/638adf9e-8c03-4c7a-8cb6-1030d64b369c-metrics-client-ca\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.043813 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.066462 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7vh5\" (UniqueName: \"kubernetes.io/projected/638adf9e-8c03-4c7a-8cb6-1030d64b369c-kube-api-access-q7vh5\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.544237 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.547324 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/638adf9e-8c03-4c7a-8cb6-1030d64b369c-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-nzc9w\" (UID: \"638adf9e-8c03-4c7a-8cb6-1030d64b369c\") " pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:17 crc kubenswrapper[4689]: I0123 10:55:17.746595 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" Jan 23 10:55:18 crc kubenswrapper[4689]: I0123 10:55:18.223229 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-nzc9w"] Jan 23 10:55:18 crc kubenswrapper[4689]: W0123 10:55:18.229545 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod638adf9e_8c03_4c7a_8cb6_1030d64b369c.slice/crio-cf16b4f001d11a036534460e6da0bd510585d1ad437f632fd6fc7be4f4e31836 WatchSource:0}: Error finding container cf16b4f001d11a036534460e6da0bd510585d1ad437f632fd6fc7be4f4e31836: Status 404 returned error can't find the container with id cf16b4f001d11a036534460e6da0bd510585d1ad437f632fd6fc7be4f4e31836 Jan 23 10:55:18 crc kubenswrapper[4689]: I0123 10:55:18.337110 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" event={"ID":"638adf9e-8c03-4c7a-8cb6-1030d64b369c","Type":"ContainerStarted","Data":"cf16b4f001d11a036534460e6da0bd510585d1ad437f632fd6fc7be4f4e31836"} Jan 23 10:55:21 crc kubenswrapper[4689]: I0123 10:55:21.357194 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" event={"ID":"638adf9e-8c03-4c7a-8cb6-1030d64b369c","Type":"ContainerStarted","Data":"3cdec6ecb9e9b5507dc251997c4f0d1fe45d3e58e25a78a8ea0847b0a793e6d9"} Jan 23 10:55:22 crc kubenswrapper[4689]: I0123 10:55:22.364080 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" event={"ID":"638adf9e-8c03-4c7a-8cb6-1030d64b369c","Type":"ContainerStarted","Data":"57afd5443e3811bc258373c75bfa98bc20eaa6c6f56dda06c5235947cdbb6b93"} Jan 23 10:55:22 crc kubenswrapper[4689]: I0123 10:55:22.384057 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-nzc9w" podStartSLOduration=3.558586462 podStartE2EDuration="6.384011984s" podCreationTimestamp="2026-01-23 10:55:16 +0000 UTC" firstStartedPulling="2026-01-23 10:55:18.230869821 +0000 UTC m=+382.855549690" lastFinishedPulling="2026-01-23 10:55:21.056295343 +0000 UTC m=+385.680975212" observedRunningTime="2026-01-23 10:55:22.379415124 +0000 UTC m=+387.004094993" watchObservedRunningTime="2026-01-23 10:55:22.384011984 +0000 UTC m=+387.008691843" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.155453 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-848p7"] Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.156427 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.158865 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-s58zf" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.159178 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.160333 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.170365 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-848p7"] Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.191237 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-gdtsq"] Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.192223 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: W0123 10:55:24.193962 4689 reflector.go:561] object-"openshift-monitoring"/"node-exporter-tls": failed to list *v1.Secret: secrets "node-exporter-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-monitoring": no relationship found between node 'crc' and this object Jan 23 10:55:24 crc kubenswrapper[4689]: E0123 10:55:24.194009 4689 reflector.go:158] "Unhandled Error" err="object-\"openshift-monitoring\"/\"node-exporter-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"node-exporter-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-monitoring\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 23 10:55:24 crc kubenswrapper[4689]: W0123 10:55:24.193942 4689 reflector.go:561] object-"openshift-monitoring"/"node-exporter-dockercfg-f9fgm": failed to list *v1.Secret: secrets "node-exporter-dockercfg-f9fgm" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-monitoring": no relationship found between node 'crc' and this object Jan 23 10:55:24 crc kubenswrapper[4689]: E0123 10:55:24.194050 4689 reflector.go:158] "Unhandled Error" err="object-\"openshift-monitoring\"/\"node-exporter-dockercfg-f9fgm\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"node-exporter-dockercfg-f9fgm\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-monitoring\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.195049 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.222043 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt"] Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.223086 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.226296 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-5fnbt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.226673 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.227454 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.228197 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.240433 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt"] Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251079 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/6790396c-25fc-40f8-8c0f-74bd6b73affd-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251121 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6tdg\" (UniqueName: \"kubernetes.io/projected/37ac445c-20e3-495d-b0da-9d7134193acd-kube-api-access-r6tdg\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251177 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251206 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251233 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/37ac445c-20e3-495d-b0da-9d7134193acd-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251261 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/6790396c-25fc-40f8-8c0f-74bd6b73affd-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251287 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-textfile\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251311 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251342 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-tls\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251376 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp5lj\" (UniqueName: \"kubernetes.io/projected/6790396c-25fc-40f8-8c0f-74bd6b73affd-kube-api-access-tp5lj\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251403 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d5kf\" (UniqueName: \"kubernetes.io/projected/cf8ee507-0a12-41eb-bef3-021e1bc74293-kube-api-access-5d5kf\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251428 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/6790396c-25fc-40f8-8c0f-74bd6b73affd-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251463 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-sys\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251481 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cf8ee507-0a12-41eb-bef3-021e1bc74293-metrics-client-ca\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251499 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-wtmp\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251524 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251540 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-root\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.251561 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/37ac445c-20e3-495d-b0da-9d7134193acd-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352461 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/6790396c-25fc-40f8-8c0f-74bd6b73affd-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352507 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6tdg\" (UniqueName: \"kubernetes.io/projected/37ac445c-20e3-495d-b0da-9d7134193acd-kube-api-access-r6tdg\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352537 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352555 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352578 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/37ac445c-20e3-495d-b0da-9d7134193acd-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352600 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/6790396c-25fc-40f8-8c0f-74bd6b73affd-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352617 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-textfile\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352635 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352655 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-tls\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352674 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp5lj\" (UniqueName: \"kubernetes.io/projected/6790396c-25fc-40f8-8c0f-74bd6b73affd-kube-api-access-tp5lj\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352691 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d5kf\" (UniqueName: \"kubernetes.io/projected/cf8ee507-0a12-41eb-bef3-021e1bc74293-kube-api-access-5d5kf\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352705 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/6790396c-25fc-40f8-8c0f-74bd6b73affd-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352728 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-sys\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352742 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cf8ee507-0a12-41eb-bef3-021e1bc74293-metrics-client-ca\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352761 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-wtmp\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352781 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352796 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-root\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.352810 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/37ac445c-20e3-495d-b0da-9d7134193acd-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.353251 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/37ac445c-20e3-495d-b0da-9d7134193acd-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.353591 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-sys\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.353977 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-root\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.353994 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cf8ee507-0a12-41eb-bef3-021e1bc74293-metrics-client-ca\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.354276 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-wtmp\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.354355 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/6790396c-25fc-40f8-8c0f-74bd6b73affd-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.354584 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-textfile\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.355012 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/37ac445c-20e3-495d-b0da-9d7134193acd-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.356802 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.359109 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/6790396c-25fc-40f8-8c0f-74bd6b73affd-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.359110 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.359396 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/6790396c-25fc-40f8-8c0f-74bd6b73affd-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.359865 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.369605 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp5lj\" (UniqueName: \"kubernetes.io/projected/6790396c-25fc-40f8-8c0f-74bd6b73affd-kube-api-access-tp5lj\") pod \"openshift-state-metrics-566fddb674-848p7\" (UID: \"6790396c-25fc-40f8-8c0f-74bd6b73affd\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.369605 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6tdg\" (UniqueName: \"kubernetes.io/projected/37ac445c-20e3-495d-b0da-9d7134193acd-kube-api-access-r6tdg\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.371468 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/37ac445c-20e3-495d-b0da-9d7134193acd-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-5b7rt\" (UID: \"37ac445c-20e3-495d-b0da-9d7134193acd\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.371583 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d5kf\" (UniqueName: \"kubernetes.io/projected/cf8ee507-0a12-41eb-bef3-021e1bc74293-kube-api-access-5d5kf\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.474421 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.535494 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.776349 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt"] Jan 23 10:55:24 crc kubenswrapper[4689]: W0123 10:55:24.782879 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37ac445c_20e3_495d_b0da_9d7134193acd.slice/crio-c1f49995f6d91dcd4855d614f223ef77f554dee01f841a4e6bd8569338e27032 WatchSource:0}: Error finding container c1f49995f6d91dcd4855d614f223ef77f554dee01f841a4e6bd8569338e27032: Status 404 returned error can't find the container with id c1f49995f6d91dcd4855d614f223ef77f554dee01f841a4e6bd8569338e27032 Jan 23 10:55:24 crc kubenswrapper[4689]: I0123 10:55:24.917601 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-848p7"] Jan 23 10:55:24 crc kubenswrapper[4689]: W0123 10:55:24.921790 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6790396c_25fc_40f8_8c0f_74bd6b73affd.slice/crio-df5c3ef48f4ca93fd4156eb1390a12a16d3fe58b2aca59ba48b33f125bb1a57e WatchSource:0}: Error finding container df5c3ef48f4ca93fd4156eb1390a12a16d3fe58b2aca59ba48b33f125bb1a57e: Status 404 returned error can't find the container with id df5c3ef48f4ca93fd4156eb1390a12a16d3fe58b2aca59ba48b33f125bb1a57e Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.131576 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.140728 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/cf8ee507-0a12-41eb-bef3-021e1bc74293-node-exporter-tls\") pod \"node-exporter-gdtsq\" (UID: \"cf8ee507-0a12-41eb-bef3-021e1bc74293\") " pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.220613 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.222302 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.225316 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.225827 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-n5ppr" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.225864 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.226069 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.226245 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.226485 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.226605 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.230129 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.231478 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.245119 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.264800 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttqck\" (UniqueName: \"kubernetes.io/projected/af850428-9296-48fc-a2f7-9215d947f943-kube-api-access-ttqck\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.264849 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-config-volume\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.264937 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/af850428-9296-48fc-a2f7-9215d947f943-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.264987 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265062 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265092 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/af850428-9296-48fc-a2f7-9215d947f943-config-out\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265114 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265186 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af850428-9296-48fc-a2f7-9215d947f943-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265230 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-web-config\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265266 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265304 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/af850428-9296-48fc-a2f7-9215d947f943-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.265328 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/af850428-9296-48fc-a2f7-9215d947f943-tls-assets\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.366833 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-web-config\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.366883 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.366929 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/af850428-9296-48fc-a2f7-9215d947f943-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.366962 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/af850428-9296-48fc-a2f7-9215d947f943-tls-assets\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.366996 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttqck\" (UniqueName: \"kubernetes.io/projected/af850428-9296-48fc-a2f7-9215d947f943-kube-api-access-ttqck\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367041 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-config-volume\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367069 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367091 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/af850428-9296-48fc-a2f7-9215d947f943-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367134 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367180 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/af850428-9296-48fc-a2f7-9215d947f943-config-out\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367207 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367241 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af850428-9296-48fc-a2f7-9215d947f943-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.367848 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/af850428-9296-48fc-a2f7-9215d947f943-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.368253 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/af850428-9296-48fc-a2f7-9215d947f943-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.368496 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af850428-9296-48fc-a2f7-9215d947f943-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.370520 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-config-volume\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.372871 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.372921 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/af850428-9296-48fc-a2f7-9215d947f943-tls-assets\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.373082 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/af850428-9296-48fc-a2f7-9215d947f943-config-out\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.373122 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-web-config\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.373502 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.375437 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.375518 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/af850428-9296-48fc-a2f7-9215d947f943-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.385367 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttqck\" (UniqueName: \"kubernetes.io/projected/af850428-9296-48fc-a2f7-9215d947f943-kube-api-access-ttqck\") pod \"alertmanager-main-0\" (UID: \"af850428-9296-48fc-a2f7-9215d947f943\") " pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.397485 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" event={"ID":"6790396c-25fc-40f8-8c0f-74bd6b73affd","Type":"ContainerStarted","Data":"2a95ef42f527e04cfa64209f34a28779a12b161c55745601e3d5126301c23885"} Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.397540 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" event={"ID":"6790396c-25fc-40f8-8c0f-74bd6b73affd","Type":"ContainerStarted","Data":"df5c3ef48f4ca93fd4156eb1390a12a16d3fe58b2aca59ba48b33f125bb1a57e"} Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.398570 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" event={"ID":"37ac445c-20e3-495d-b0da-9d7134193acd","Type":"ContainerStarted","Data":"c1f49995f6d91dcd4855d614f223ef77f554dee01f841a4e6bd8569338e27032"} Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.455899 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-f9fgm" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.456080 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-gdtsq" Jan 23 10:55:25 crc kubenswrapper[4689]: W0123 10:55:25.474140 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf8ee507_0a12_41eb_bef3_021e1bc74293.slice/crio-c8e5b9382e30a7aa8659c0ad9f79b2573132b3b2805db23e39723322174bac5f WatchSource:0}: Error finding container c8e5b9382e30a7aa8659c0ad9f79b2573132b3b2805db23e39723322174bac5f: Status 404 returned error can't find the container with id c8e5b9382e30a7aa8659c0ad9f79b2573132b3b2805db23e39723322174bac5f Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.540089 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Jan 23 10:55:25 crc kubenswrapper[4689]: I0123 10:55:25.934804 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 23 10:55:25 crc kubenswrapper[4689]: W0123 10:55:25.959660 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf850428_9296_48fc_a2f7_9215d947f943.slice/crio-b9d93bdac7862e66ca195c3c7192589432bef9280c5702f70ff8766f4ee586ab WatchSource:0}: Error finding container b9d93bdac7862e66ca195c3c7192589432bef9280c5702f70ff8766f4ee586ab: Status 404 returned error can't find the container with id b9d93bdac7862e66ca195c3c7192589432bef9280c5702f70ff8766f4ee586ab Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.124802 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-85b45566f7-bqf9r"] Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.126731 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.133646 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.133722 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.133775 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.133948 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-1g38e8q0sormr" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.134720 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.135354 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-rqnjb" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.135552 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.142564 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-85b45566f7-bqf9r"] Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.184243 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-grpc-tls\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.184407 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.184942 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.184984 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v52pn\" (UniqueName: \"kubernetes.io/projected/9875c931-d946-41e1-8f23-89946abc0978-kube-api-access-v52pn\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.185007 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.185046 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/9875c931-d946-41e1-8f23-89946abc0978-metrics-client-ca\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.185072 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-tls\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.185384 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.286454 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-grpc-tls\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.286513 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.286574 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.286915 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v52pn\" (UniqueName: \"kubernetes.io/projected/9875c931-d946-41e1-8f23-89946abc0978-kube-api-access-v52pn\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.287413 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.287436 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/9875c931-d946-41e1-8f23-89946abc0978-metrics-client-ca\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.287463 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-tls\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.287513 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.288607 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/9875c931-d946-41e1-8f23-89946abc0978-metrics-client-ca\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.296753 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.296883 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.299664 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.300190 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-tls\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.302312 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.304181 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/9875c931-d946-41e1-8f23-89946abc0978-secret-grpc-tls\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.304830 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v52pn\" (UniqueName: \"kubernetes.io/projected/9875c931-d946-41e1-8f23-89946abc0978-kube-api-access-v52pn\") pod \"thanos-querier-85b45566f7-bqf9r\" (UID: \"9875c931-d946-41e1-8f23-89946abc0978\") " pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.406065 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" event={"ID":"6790396c-25fc-40f8-8c0f-74bd6b73affd","Type":"ContainerStarted","Data":"d641e7ac9a2665ca41e8aadb2e3f0884c85fc17f56aa28f01225a8d13897415d"} Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.407343 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerStarted","Data":"b9d93bdac7862e66ca195c3c7192589432bef9280c5702f70ff8766f4ee586ab"} Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.408327 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-gdtsq" event={"ID":"cf8ee507-0a12-41eb-bef3-021e1bc74293","Type":"ContainerStarted","Data":"c8e5b9382e30a7aa8659c0ad9f79b2573132b3b2805db23e39723322174bac5f"} Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.456388 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:26 crc kubenswrapper[4689]: I0123 10:55:26.870561 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-85b45566f7-bqf9r"] Jan 23 10:55:27 crc kubenswrapper[4689]: I0123 10:55:27.416948 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" event={"ID":"37ac445c-20e3-495d-b0da-9d7134193acd","Type":"ContainerStarted","Data":"8e0be67d05bcc68f5fa09b908d1beebd04f459ec8d8bc170aedc561b3ff0008a"} Jan 23 10:55:27 crc kubenswrapper[4689]: I0123 10:55:27.417355 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" event={"ID":"37ac445c-20e3-495d-b0da-9d7134193acd","Type":"ContainerStarted","Data":"d5ab6cad7d9b216f8d12a2eed21aff9f2c3975db374962d1bc7807045bae6700"} Jan 23 10:55:27 crc kubenswrapper[4689]: W0123 10:55:27.660983 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9875c931_d946_41e1_8f23_89946abc0978.slice/crio-e32e10b98c10aef2da3640e465e1c3bc4b098fa82040272a1537523914761e5a WatchSource:0}: Error finding container e32e10b98c10aef2da3640e465e1c3bc4b098fa82040272a1537523914761e5a: Status 404 returned error can't find the container with id e32e10b98c10aef2da3640e465e1c3bc4b098fa82040272a1537523914761e5a Jan 23 10:55:28 crc kubenswrapper[4689]: I0123 10:55:28.424652 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" event={"ID":"9875c931-d946-41e1-8f23-89946abc0978","Type":"ContainerStarted","Data":"e32e10b98c10aef2da3640e465e1c3bc4b098fa82040272a1537523914761e5a"} Jan 23 10:55:28 crc kubenswrapper[4689]: I0123 10:55:28.989133 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6c6f9b55bb-77n8r"] Jan 23 10:55:28 crc kubenswrapper[4689]: I0123 10:55:28.994546 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.003108 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6c6f9b55bb-77n8r"] Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.023511 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-serving-cert\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.023562 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-trusted-ca-bundle\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.023581 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2wrz\" (UniqueName: \"kubernetes.io/projected/6207352c-b7c8-44b5-ad56-6a3e7d365198-kube-api-access-g2wrz\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.023635 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-service-ca\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.023656 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-oauth-config\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.023740 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-config\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.023797 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-oauth-serving-cert\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.124547 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-serving-cert\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.124603 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-trusted-ca-bundle\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.124629 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2wrz\" (UniqueName: \"kubernetes.io/projected/6207352c-b7c8-44b5-ad56-6a3e7d365198-kube-api-access-g2wrz\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.124665 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-service-ca\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.124689 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-oauth-config\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.124728 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-config\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.124761 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-oauth-serving-cert\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.125914 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-trusted-ca-bundle\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.126173 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-oauth-serving-cert\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.126349 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-service-ca\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.126670 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-config\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.130890 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-serving-cert\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.131318 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-oauth-config\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.141220 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2wrz\" (UniqueName: \"kubernetes.io/projected/6207352c-b7c8-44b5-ad56-6a3e7d365198-kube-api-access-g2wrz\") pod \"console-6c6f9b55bb-77n8r\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.311484 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.543566 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-56b6c6f75d-w9wz2"] Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.544868 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.548508 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.548871 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.549121 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-lcb5d" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.549305 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-8v3ktf6k1idcp" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.549340 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.556474 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.562412 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-56b6c6f75d-w9wz2"] Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.722334 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6c6f9b55bb-77n8r"] Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.736193 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f66fr\" (UniqueName: \"kubernetes.io/projected/c09648d1-cecf-420d-8b1c-226eb880a7a3-kube-api-access-f66fr\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.736472 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/c09648d1-cecf-420d-8b1c-226eb880a7a3-audit-log\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.736523 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/c09648d1-cecf-420d-8b1c-226eb880a7a3-metrics-server-audit-profiles\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.736572 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-secret-metrics-server-tls\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.736612 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-secret-metrics-client-certs\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.736632 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c09648d1-cecf-420d-8b1c-226eb880a7a3-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.736671 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-client-ca-bundle\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: W0123 10:55:29.744756 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6207352c_b7c8_44b5_ad56_6a3e7d365198.slice/crio-b0def27e2cef495659d08caba10b17deb3ff385e8860e8bae16c50d3a66010d9 WatchSource:0}: Error finding container b0def27e2cef495659d08caba10b17deb3ff385e8860e8bae16c50d3a66010d9: Status 404 returned error can't find the container with id b0def27e2cef495659d08caba10b17deb3ff385e8860e8bae16c50d3a66010d9 Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.838039 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-client-ca-bundle\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.839206 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f66fr\" (UniqueName: \"kubernetes.io/projected/c09648d1-cecf-420d-8b1c-226eb880a7a3-kube-api-access-f66fr\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.839279 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/c09648d1-cecf-420d-8b1c-226eb880a7a3-audit-log\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.839312 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/c09648d1-cecf-420d-8b1c-226eb880a7a3-metrics-server-audit-profiles\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.839342 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-secret-metrics-server-tls\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.839380 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-secret-metrics-client-certs\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.839405 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c09648d1-cecf-420d-8b1c-226eb880a7a3-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.839933 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/c09648d1-cecf-420d-8b1c-226eb880a7a3-audit-log\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.840280 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c09648d1-cecf-420d-8b1c-226eb880a7a3-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.840864 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/c09648d1-cecf-420d-8b1c-226eb880a7a3-metrics-server-audit-profiles\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.845127 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-secret-metrics-server-tls\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.845225 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-client-ca-bundle\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.845878 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/c09648d1-cecf-420d-8b1c-226eb880a7a3-secret-metrics-client-certs\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.854707 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f66fr\" (UniqueName: \"kubernetes.io/projected/c09648d1-cecf-420d-8b1c-226eb880a7a3-kube-api-access-f66fr\") pod \"metrics-server-56b6c6f75d-w9wz2\" (UID: \"c09648d1-cecf-420d-8b1c-226eb880a7a3\") " pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.873077 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.969870 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2"] Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.970647 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.972642 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.978028 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2"] Jan 23 10:55:29 crc kubenswrapper[4689]: I0123 10:55:29.979204 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.091824 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-56b6c6f75d-w9wz2"] Jan 23 10:55:30 crc kubenswrapper[4689]: W0123 10:55:30.098779 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc09648d1_cecf_420d_8b1c_226eb880a7a3.slice/crio-cfc816bf4551bd77a68fdf9de653eb28017488630d482a622ed3ba23bfb4764a WatchSource:0}: Error finding container cfc816bf4551bd77a68fdf9de653eb28017488630d482a622ed3ba23bfb4764a: Status 404 returned error can't find the container with id cfc816bf4551bd77a68fdf9de653eb28017488630d482a622ed3ba23bfb4764a Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.155980 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/75df6af8-26fb-433c-99c0-da4b88e4796d-monitoring-plugin-cert\") pod \"monitoring-plugin-78f56cd898-f5fg2\" (UID: \"75df6af8-26fb-433c-99c0-da4b88e4796d\") " pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.257754 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/75df6af8-26fb-433c-99c0-da4b88e4796d-monitoring-plugin-cert\") pod \"monitoring-plugin-78f56cd898-f5fg2\" (UID: \"75df6af8-26fb-433c-99c0-da4b88e4796d\") " pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.268332 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/75df6af8-26fb-433c-99c0-da4b88e4796d-monitoring-plugin-cert\") pod \"monitoring-plugin-78f56cd898-f5fg2\" (UID: \"75df6af8-26fb-433c-99c0-da4b88e4796d\") " pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.287702 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.440553 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" event={"ID":"c09648d1-cecf-420d-8b1c-226eb880a7a3","Type":"ContainerStarted","Data":"cfc816bf4551bd77a68fdf9de653eb28017488630d482a622ed3ba23bfb4764a"} Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.448676 4689 generic.go:334] "Generic (PLEG): container finished" podID="cf8ee507-0a12-41eb-bef3-021e1bc74293" containerID="539b98e4921be5c5236c1deccab58d6a032a2b45a13ec8a2fb7a704b57d5e082" exitCode=0 Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.448741 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-gdtsq" event={"ID":"cf8ee507-0a12-41eb-bef3-021e1bc74293","Type":"ContainerDied","Data":"539b98e4921be5c5236c1deccab58d6a032a2b45a13ec8a2fb7a704b57d5e082"} Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.458859 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" event={"ID":"37ac445c-20e3-495d-b0da-9d7134193acd","Type":"ContainerStarted","Data":"dd9f121f739fc10b79f632ce8c0fc278c3cdd9be58c6321a4d4d6537c94cb907"} Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.466990 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.469096 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.472350 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.472507 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.472538 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.472551 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-njmlh" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.472830 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.472917 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.472970 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.473053 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.473987 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-5tonc98ho0e6q" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.474252 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.474472 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.474967 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" event={"ID":"6790396c-25fc-40f8-8c0f-74bd6b73affd","Type":"ContainerStarted","Data":"068cc5722fcf62a4ccae3c1b45dfe53b78032af4ed8c15cf4a1dd5d9f479b6f9"} Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.477039 4689 generic.go:334] "Generic (PLEG): container finished" podID="af850428-9296-48fc-a2f7-9215d947f943" containerID="6f01a6a8a22a11a635dc313099a60df112800b3d1688da4bbd6e1f383287ca77" exitCode=0 Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.477855 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerDied","Data":"6f01a6a8a22a11a635dc313099a60df112800b3d1688da4bbd6e1f383287ca77"} Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.484932 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c6f9b55bb-77n8r" event={"ID":"6207352c-b7c8-44b5-ad56-6a3e7d365198","Type":"ContainerStarted","Data":"3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043"} Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.486026 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c6f9b55bb-77n8r" event={"ID":"6207352c-b7c8-44b5-ad56-6a3e7d365198","Type":"ContainerStarted","Data":"b0def27e2cef495659d08caba10b17deb3ff385e8860e8bae16c50d3a66010d9"} Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.489319 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.489631 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.495344 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.508565 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6c6f9b55bb-77n8r" podStartSLOduration=2.508546243 podStartE2EDuration="2.508546243s" podCreationTimestamp="2026-01-23 10:55:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:55:30.506324858 +0000 UTC m=+395.131004717" watchObservedRunningTime="2026-01-23 10:55:30.508546243 +0000 UTC m=+395.133226102" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.531224 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-848p7" podStartSLOduration=2.872014826 podStartE2EDuration="6.531202877s" podCreationTimestamp="2026-01-23 10:55:24 +0000 UTC" firstStartedPulling="2026-01-23 10:55:25.614444625 +0000 UTC m=+390.239124494" lastFinishedPulling="2026-01-23 10:55:29.273632656 +0000 UTC m=+393.898312545" observedRunningTime="2026-01-23 10:55:30.529620128 +0000 UTC m=+395.154299997" watchObservedRunningTime="2026-01-23 10:55:30.531202877 +0000 UTC m=+395.155882736" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560568 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560625 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560650 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560671 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560779 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/efa605e2-690e-48af-9218-a8826ead1e88-config-out\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560827 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-web-config\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560848 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.560971 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms4zv\" (UniqueName: \"kubernetes.io/projected/efa605e2-690e-48af-9218-a8826ead1e88-kube-api-access-ms4zv\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561071 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561104 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561175 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561256 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561340 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561361 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561378 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561405 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/efa605e2-690e-48af-9218-a8826ead1e88-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561463 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.561495 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-config\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.579787 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-5b7rt" podStartSLOduration=4.760337602 podStartE2EDuration="6.579769926s" podCreationTimestamp="2026-01-23 10:55:24 +0000 UTC" firstStartedPulling="2026-01-23 10:55:24.785476966 +0000 UTC m=+389.410156825" lastFinishedPulling="2026-01-23 10:55:26.60490929 +0000 UTC m=+391.229589149" observedRunningTime="2026-01-23 10:55:30.560488846 +0000 UTC m=+395.185168705" watchObservedRunningTime="2026-01-23 10:55:30.579769926 +0000 UTC m=+395.204449835" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662073 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662122 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662175 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662202 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662230 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662244 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662260 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662275 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/efa605e2-690e-48af-9218-a8826ead1e88-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662295 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662312 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-config\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662406 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662433 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662451 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662465 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662484 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/efa605e2-690e-48af-9218-a8826ead1e88-config-out\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662527 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-web-config\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662544 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms4zv\" (UniqueName: \"kubernetes.io/projected/efa605e2-690e-48af-9218-a8826ead1e88-kube-api-access-ms4zv\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.662561 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.663396 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.664077 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.664273 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.664886 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.666070 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.669736 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/efa605e2-690e-48af-9218-a8826ead1e88-config-out\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.671784 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.674122 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/efa605e2-690e-48af-9218-a8826ead1e88-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.674585 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-config\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.674786 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-web-config\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.675048 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.675066 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.675246 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.675723 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/efa605e2-690e-48af-9218-a8826ead1e88-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.675778 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.679927 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.680036 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/efa605e2-690e-48af-9218-a8826ead1e88-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.686182 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms4zv\" (UniqueName: \"kubernetes.io/projected/efa605e2-690e-48af-9218-a8826ead1e88-kube-api-access-ms4zv\") pod \"prometheus-k8s-0\" (UID: \"efa605e2-690e-48af-9218-a8826ead1e88\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.750225 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2"] Jan 23 10:55:30 crc kubenswrapper[4689]: I0123 10:55:30.797952 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:55:31 crc kubenswrapper[4689]: I0123 10:55:31.205206 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 23 10:55:31 crc kubenswrapper[4689]: I0123 10:55:31.491727 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" event={"ID":"75df6af8-26fb-433c-99c0-da4b88e4796d","Type":"ContainerStarted","Data":"895f90380f01f044167ba32f7ec73160edb944db52dbd45119a0c01889fb688e"} Jan 23 10:55:31 crc kubenswrapper[4689]: I0123 10:55:31.495054 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-gdtsq" event={"ID":"cf8ee507-0a12-41eb-bef3-021e1bc74293","Type":"ContainerStarted","Data":"86d12da0a834d3cc96123a1e07c1bcb220ee584cc07adc647fbc1f9dfa71fa38"} Jan 23 10:55:31 crc kubenswrapper[4689]: I0123 10:55:31.495382 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-gdtsq" event={"ID":"cf8ee507-0a12-41eb-bef3-021e1bc74293","Type":"ContainerStarted","Data":"e5a20465018e50cc9cbfab05804def487097bd20651da2eb2ad25b1af20515de"} Jan 23 10:55:31 crc kubenswrapper[4689]: I0123 10:55:31.516208 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-gdtsq" podStartSLOduration=3.718587998 podStartE2EDuration="7.516188321s" podCreationTimestamp="2026-01-23 10:55:24 +0000 UTC" firstStartedPulling="2026-01-23 10:55:25.475968022 +0000 UTC m=+390.100647881" lastFinishedPulling="2026-01-23 10:55:29.273568344 +0000 UTC m=+393.898248204" observedRunningTime="2026-01-23 10:55:31.512992912 +0000 UTC m=+396.137672781" watchObservedRunningTime="2026-01-23 10:55:31.516188321 +0000 UTC m=+396.140868180" Jan 23 10:55:32 crc kubenswrapper[4689]: I0123 10:55:32.501277 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"7ae11724070fc28647748dd5d7c0fc2f581b0a736f75f32897f6e9bff21696b5"} Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.311698 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.311753 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.311794 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.312348 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e315c320f0ad2702681ecbe44e73ceed0977d5c34d8230cf6b96dd29e24d95c8"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.312390 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://e315c320f0ad2702681ecbe44e73ceed0977d5c34d8230cf6b96dd29e24d95c8" gracePeriod=600 Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.510563 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="e315c320f0ad2702681ecbe44e73ceed0977d5c34d8230cf6b96dd29e24d95c8" exitCode=0 Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.510665 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"e315c320f0ad2702681ecbe44e73ceed0977d5c34d8230cf6b96dd29e24d95c8"} Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.510728 4689 scope.go:117] "RemoveContainer" containerID="20a28645a75bcfe2156b98220e783da1b6e37dc4b0f0c709d4c610544e2774f4" Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.512823 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" event={"ID":"c09648d1-cecf-420d-8b1c-226eb880a7a3","Type":"ContainerStarted","Data":"38aa0cd5f5288400c6515cf6a9235754c9a90fa712fa7bcc350799d57840292e"} Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.514906 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" event={"ID":"9875c931-d946-41e1-8f23-89946abc0978","Type":"ContainerStarted","Data":"5a336ed4dbd7300afc7d0c1a627b2a24bbbfd0508c3f814c7a8d7d18384db74d"} Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.514944 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" event={"ID":"9875c931-d946-41e1-8f23-89946abc0978","Type":"ContainerStarted","Data":"8e98814cf18a34ffb28b7d3d88ed768fd3c4e7e56a922d87f264141461416f08"} Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.517040 4689 generic.go:334] "Generic (PLEG): container finished" podID="efa605e2-690e-48af-9218-a8826ead1e88" containerID="c7ec39f5d5a944797a5ed77330b5c25d64e19bb253d219073d2742cc28951765" exitCode=0 Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.517093 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerDied","Data":"c7ec39f5d5a944797a5ed77330b5c25d64e19bb253d219073d2742cc28951765"} Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.521233 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 10:55:33 crc kubenswrapper[4689]: I0123 10:55:33.526943 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" podStartSLOduration=2.254131337 podStartE2EDuration="4.526902494s" podCreationTimestamp="2026-01-23 10:55:29 +0000 UTC" firstStartedPulling="2026-01-23 10:55:30.102181306 +0000 UTC m=+394.726861165" lastFinishedPulling="2026-01-23 10:55:32.374952473 +0000 UTC m=+396.999632322" observedRunningTime="2026-01-23 10:55:33.52633742 +0000 UTC m=+398.151017289" watchObservedRunningTime="2026-01-23 10:55:33.526902494 +0000 UTC m=+398.151582373" Jan 23 10:55:34 crc kubenswrapper[4689]: I0123 10:55:34.789669 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" event={"ID":"75df6af8-26fb-433c-99c0-da4b88e4796d","Type":"ContainerStarted","Data":"60ab3eb9b8f998e6fbcd5bbd4ee6897828ec8529f499175c33103d085c543621"} Jan 23 10:55:34 crc kubenswrapper[4689]: I0123 10:55:34.793212 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 10:55:34 crc kubenswrapper[4689]: I0123 10:55:34.797617 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"a4159ab9ef2d5639be486fab68cc378ce37a59b9cc85de6b3699c4833d973bdb"} Jan 23 10:55:34 crc kubenswrapper[4689]: I0123 10:55:34.802615 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" event={"ID":"9875c931-d946-41e1-8f23-89946abc0978","Type":"ContainerStarted","Data":"12fc90f693845b56bb325a5a79917b3c7d54d669d2b7eedc31fcb442be03b2dc"} Jan 23 10:55:34 crc kubenswrapper[4689]: I0123 10:55:34.804799 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerStarted","Data":"93544f1dbe276876101635b6a63be08c83925431704449b53dae9fa395a5aaf6"} Jan 23 10:55:34 crc kubenswrapper[4689]: I0123 10:55:34.836594 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" podStartSLOduration=3.066698988 podStartE2EDuration="5.836574202s" podCreationTimestamp="2026-01-23 10:55:29 +0000 UTC" firstStartedPulling="2026-01-23 10:55:30.765646135 +0000 UTC m=+395.390325994" lastFinishedPulling="2026-01-23 10:55:33.535521349 +0000 UTC m=+398.160201208" observedRunningTime="2026-01-23 10:55:34.81641021 +0000 UTC m=+399.441090079" watchObservedRunningTime="2026-01-23 10:55:34.836574202 +0000 UTC m=+399.461254071" Jan 23 10:55:35 crc kubenswrapper[4689]: I0123 10:55:35.137989 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 10:55:35 crc kubenswrapper[4689]: I0123 10:55:35.816666 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerStarted","Data":"bd9304cfaae3e99da2bf792c96cf0ef9ea00d5631950a8d92242f0af1b0073d1"} Jan 23 10:55:38 crc kubenswrapper[4689]: I0123 10:55:38.843124 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerStarted","Data":"ce3fc40678bee08267e5cbe5ca19a70c80255b5cccfa6e676cfb754ee72c3e6d"} Jan 23 10:55:39 crc kubenswrapper[4689]: I0123 10:55:39.312391 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:39 crc kubenswrapper[4689]: I0123 10:55:39.312475 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:39 crc kubenswrapper[4689]: I0123 10:55:39.320087 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:39 crc kubenswrapper[4689]: I0123 10:55:39.857228 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:55:39 crc kubenswrapper[4689]: I0123 10:55:39.934821 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-kz8dz"] Jan 23 10:55:41 crc kubenswrapper[4689]: I0123 10:55:41.867882 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerStarted","Data":"257db0aba43f791ec6fd6cb97f7c77cba060656859a077a9381e8ae99984ab85"} Jan 23 10:55:44 crc kubenswrapper[4689]: I0123 10:55:44.897339 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerStarted","Data":"62ae8af81fcafcb329bf2ea4d9cef2a2106444cbaf10db6b008288a0f4ca0ccd"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.911018 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"af850428-9296-48fc-a2f7-9215d947f943","Type":"ContainerStarted","Data":"5fddee36f60a4e2119c07b98eb210689c2955b56a62a8aca55cae93cc55cb6c3"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.920636 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" event={"ID":"9875c931-d946-41e1-8f23-89946abc0978","Type":"ContainerStarted","Data":"e72917299c005a6b89c3171333eeb6038210a1b8dee896567859e8b7c8018627"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.920686 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" event={"ID":"9875c931-d946-41e1-8f23-89946abc0978","Type":"ContainerStarted","Data":"673b11d682f97eff811415c7719103017512e358664f4121b2c3bb06a540387d"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.920700 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" event={"ID":"9875c931-d946-41e1-8f23-89946abc0978","Type":"ContainerStarted","Data":"417529e96da531327c73fab3825d9582b97bce3c92d1754f9b62ee47797fd340"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.921814 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.924754 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"f38a81f35960ccc60cc6303d9d30f7c3e4e016ad47398357ae5d6253214ff7db"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.924781 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"e06c25837fc4bcc783fbd292e6b925a0480f067aee39dde12a4177f23a963ca3"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.924794 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18"} Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.934078 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.983939 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=13.385091814 podStartE2EDuration="20.983921648s" podCreationTimestamp="2026-01-23 10:55:25 +0000 UTC" firstStartedPulling="2026-01-23 10:55:25.963125573 +0000 UTC m=+390.587805432" lastFinishedPulling="2026-01-23 10:55:33.561955407 +0000 UTC m=+398.186635266" observedRunningTime="2026-01-23 10:55:45.950607928 +0000 UTC m=+410.575287797" watchObservedRunningTime="2026-01-23 10:55:45.983921648 +0000 UTC m=+410.608601507" Jan 23 10:55:45 crc kubenswrapper[4689]: I0123 10:55:45.986051 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podStartSLOduration=3.62242175 podStartE2EDuration="19.98604266s" podCreationTimestamp="2026-01-23 10:55:26 +0000 UTC" firstStartedPulling="2026-01-23 10:55:27.664875671 +0000 UTC m=+392.289555530" lastFinishedPulling="2026-01-23 10:55:44.028496581 +0000 UTC m=+408.653176440" observedRunningTime="2026-01-23 10:55:45.978727558 +0000 UTC m=+410.603407417" watchObservedRunningTime="2026-01-23 10:55:45.98604266 +0000 UTC m=+410.610722539" Jan 23 10:55:46 crc kubenswrapper[4689]: I0123 10:55:46.941348 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"781d67e840a69b9880a4c24c519ff16815c0658ed779223c8c8cf65b3da37780"} Jan 23 10:55:46 crc kubenswrapper[4689]: I0123 10:55:46.941399 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"aa2fbad511109d034f90bd65874d20d000252aa19d28de5a679d2529160ebec4"} Jan 23 10:55:46 crc kubenswrapper[4689]: I0123 10:55:46.941418 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"24ace03a3676520954ec244d4dae70c4f1bbd278f6d5084bbdaa7eff7df28274"} Jan 23 10:55:46 crc kubenswrapper[4689]: I0123 10:55:46.985621 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=6.475522159 podStartE2EDuration="16.985597807s" podCreationTimestamp="2026-01-23 10:55:30 +0000 UTC" firstStartedPulling="2026-01-23 10:55:33.51992091 +0000 UTC m=+398.144600779" lastFinishedPulling="2026-01-23 10:55:44.029996568 +0000 UTC m=+408.654676427" observedRunningTime="2026-01-23 10:55:46.980059699 +0000 UTC m=+411.604739558" watchObservedRunningTime="2026-01-23 10:55:46.985597807 +0000 UTC m=+411.610277676" Jan 23 10:55:49 crc kubenswrapper[4689]: I0123 10:55:49.873400 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:49 crc kubenswrapper[4689]: I0123 10:55:49.874037 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:55:50 crc kubenswrapper[4689]: I0123 10:55:50.798463 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:56:04 crc kubenswrapper[4689]: I0123 10:56:04.987768 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-kz8dz" podUID="484d0401-5634-42e8-b09e-8c7eb65aa84c" containerName="console" containerID="cri-o://6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e" gracePeriod=15 Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.457104 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-kz8dz_484d0401-5634-42e8-b09e-8c7eb65aa84c/console/0.log" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.457385 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.575375 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-config\") pod \"484d0401-5634-42e8-b09e-8c7eb65aa84c\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.575444 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-oauth-config\") pod \"484d0401-5634-42e8-b09e-8c7eb65aa84c\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.575487 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-trusted-ca-bundle\") pod \"484d0401-5634-42e8-b09e-8c7eb65aa84c\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.575533 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4qkr\" (UniqueName: \"kubernetes.io/projected/484d0401-5634-42e8-b09e-8c7eb65aa84c-kube-api-access-m4qkr\") pod \"484d0401-5634-42e8-b09e-8c7eb65aa84c\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.575889 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-oauth-serving-cert\") pod \"484d0401-5634-42e8-b09e-8c7eb65aa84c\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.575943 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-serving-cert\") pod \"484d0401-5634-42e8-b09e-8c7eb65aa84c\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.576824 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "484d0401-5634-42e8-b09e-8c7eb65aa84c" (UID: "484d0401-5634-42e8-b09e-8c7eb65aa84c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.576854 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "484d0401-5634-42e8-b09e-8c7eb65aa84c" (UID: "484d0401-5634-42e8-b09e-8c7eb65aa84c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.576975 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-service-ca\") pod \"484d0401-5634-42e8-b09e-8c7eb65aa84c\" (UID: \"484d0401-5634-42e8-b09e-8c7eb65aa84c\") " Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.576965 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-config" (OuterVolumeSpecName: "console-config") pod "484d0401-5634-42e8-b09e-8c7eb65aa84c" (UID: "484d0401-5634-42e8-b09e-8c7eb65aa84c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.577483 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.577507 4689 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.577519 4689 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.577763 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-service-ca" (OuterVolumeSpecName: "service-ca") pod "484d0401-5634-42e8-b09e-8c7eb65aa84c" (UID: "484d0401-5634-42e8-b09e-8c7eb65aa84c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.584572 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/484d0401-5634-42e8-b09e-8c7eb65aa84c-kube-api-access-m4qkr" (OuterVolumeSpecName: "kube-api-access-m4qkr") pod "484d0401-5634-42e8-b09e-8c7eb65aa84c" (UID: "484d0401-5634-42e8-b09e-8c7eb65aa84c"). InnerVolumeSpecName "kube-api-access-m4qkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.586630 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "484d0401-5634-42e8-b09e-8c7eb65aa84c" (UID: "484d0401-5634-42e8-b09e-8c7eb65aa84c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.600655 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "484d0401-5634-42e8-b09e-8c7eb65aa84c" (UID: "484d0401-5634-42e8-b09e-8c7eb65aa84c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.678498 4689 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.678801 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/484d0401-5634-42e8-b09e-8c7eb65aa84c-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.678810 4689 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/484d0401-5634-42e8-b09e-8c7eb65aa84c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:56:05 crc kubenswrapper[4689]: I0123 10:56:05.678818 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4qkr\" (UniqueName: \"kubernetes.io/projected/484d0401-5634-42e8-b09e-8c7eb65aa84c-kube-api-access-m4qkr\") on node \"crc\" DevicePath \"\"" Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.089713 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-kz8dz_484d0401-5634-42e8-b09e-8c7eb65aa84c/console/0.log" Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.090037 4689 generic.go:334] "Generic (PLEG): container finished" podID="484d0401-5634-42e8-b09e-8c7eb65aa84c" containerID="6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e" exitCode=2 Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.090075 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kz8dz" event={"ID":"484d0401-5634-42e8-b09e-8c7eb65aa84c","Type":"ContainerDied","Data":"6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e"} Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.090104 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kz8dz" event={"ID":"484d0401-5634-42e8-b09e-8c7eb65aa84c","Type":"ContainerDied","Data":"e43e5995d23af20e7a17ad62ad0cceda853081effa76c143ba60abd9706ee1b9"} Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.090114 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kz8dz" Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.090124 4689 scope.go:117] "RemoveContainer" containerID="6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e" Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.117982 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-kz8dz"] Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.127767 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-kz8dz"] Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.132277 4689 scope.go:117] "RemoveContainer" containerID="6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e" Jan 23 10:56:06 crc kubenswrapper[4689]: E0123 10:56:06.132876 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e\": container with ID starting with 6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e not found: ID does not exist" containerID="6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e" Jan 23 10:56:06 crc kubenswrapper[4689]: I0123 10:56:06.132929 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e"} err="failed to get container status \"6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e\": rpc error: code = NotFound desc = could not find container \"6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e\": container with ID starting with 6facd64d7c19bcaf25609b0785f172b371395c444ca6d6b52d0969700dcfce7e not found: ID does not exist" Jan 23 10:56:07 crc kubenswrapper[4689]: I0123 10:56:07.649643 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="484d0401-5634-42e8-b09e-8c7eb65aa84c" path="/var/lib/kubelet/pods/484d0401-5634-42e8-b09e-8c7eb65aa84c/volumes" Jan 23 10:56:09 crc kubenswrapper[4689]: I0123 10:56:09.878316 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:56:09 crc kubenswrapper[4689]: I0123 10:56:09.883420 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 10:56:30 crc kubenswrapper[4689]: I0123 10:56:30.798651 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:56:30 crc kubenswrapper[4689]: I0123 10:56:30.840111 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:56:31 crc kubenswrapper[4689]: I0123 10:56:31.309787 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 10:57:33 crc kubenswrapper[4689]: I0123 10:57:33.310641 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:57:33 crc kubenswrapper[4689]: I0123 10:57:33.311411 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.006502 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6cb59b46c-trq59"] Jan 23 10:57:37 crc kubenswrapper[4689]: E0123 10:57:37.009822 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="484d0401-5634-42e8-b09e-8c7eb65aa84c" containerName="console" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.010056 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="484d0401-5634-42e8-b09e-8c7eb65aa84c" containerName="console" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.010416 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="484d0401-5634-42e8-b09e-8c7eb65aa84c" containerName="console" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.011381 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.034347 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6cb59b46c-trq59"] Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.105288 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-serving-cert\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.105601 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-oauth-serving-cert\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.105634 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqlzd\" (UniqueName: \"kubernetes.io/projected/75f5477a-8c1c-4d7d-b6b5-373a337d3642-kube-api-access-bqlzd\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.105659 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-service-ca\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.105691 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-config\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.105711 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-oauth-config\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.105831 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-trusted-ca-bundle\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.206801 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-trusted-ca-bundle\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.206863 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-serving-cert\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.206883 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-oauth-serving-cert\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.206909 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqlzd\" (UniqueName: \"kubernetes.io/projected/75f5477a-8c1c-4d7d-b6b5-373a337d3642-kube-api-access-bqlzd\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.206931 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-service-ca\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.206959 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-config\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.206976 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-oauth-config\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.208101 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-service-ca\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.208160 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-oauth-serving-cert\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.209135 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-config\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.209340 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-trusted-ca-bundle\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.213054 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-oauth-config\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.220614 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-serving-cert\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.237672 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqlzd\" (UniqueName: \"kubernetes.io/projected/75f5477a-8c1c-4d7d-b6b5-373a337d3642-kube-api-access-bqlzd\") pod \"console-6cb59b46c-trq59\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.339194 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:37 crc kubenswrapper[4689]: I0123 10:57:37.823796 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6cb59b46c-trq59"] Jan 23 10:57:37 crc kubenswrapper[4689]: W0123 10:57:37.830604 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75f5477a_8c1c_4d7d_b6b5_373a337d3642.slice/crio-da0306cc0f0f259088ef981636b4faf2dd6638d9382065a03b42da4b95f1450b WatchSource:0}: Error finding container da0306cc0f0f259088ef981636b4faf2dd6638d9382065a03b42da4b95f1450b: Status 404 returned error can't find the container with id da0306cc0f0f259088ef981636b4faf2dd6638d9382065a03b42da4b95f1450b Jan 23 10:57:38 crc kubenswrapper[4689]: I0123 10:57:38.742773 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6cb59b46c-trq59" event={"ID":"75f5477a-8c1c-4d7d-b6b5-373a337d3642","Type":"ContainerStarted","Data":"2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d"} Jan 23 10:57:38 crc kubenswrapper[4689]: I0123 10:57:38.743243 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6cb59b46c-trq59" event={"ID":"75f5477a-8c1c-4d7d-b6b5-373a337d3642","Type":"ContainerStarted","Data":"da0306cc0f0f259088ef981636b4faf2dd6638d9382065a03b42da4b95f1450b"} Jan 23 10:57:38 crc kubenswrapper[4689]: I0123 10:57:38.763721 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6cb59b46c-trq59" podStartSLOduration=2.7636607399999997 podStartE2EDuration="2.76366074s" podCreationTimestamp="2026-01-23 10:57:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 10:57:38.763320712 +0000 UTC m=+523.388000631" watchObservedRunningTime="2026-01-23 10:57:38.76366074 +0000 UTC m=+523.388340679" Jan 23 10:57:47 crc kubenswrapper[4689]: I0123 10:57:47.340110 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:47 crc kubenswrapper[4689]: I0123 10:57:47.340589 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:47 crc kubenswrapper[4689]: I0123 10:57:47.348404 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:47 crc kubenswrapper[4689]: I0123 10:57:47.814288 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 10:57:47 crc kubenswrapper[4689]: I0123 10:57:47.892663 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-6c6f9b55bb-77n8r"] Jan 23 10:58:03 crc kubenswrapper[4689]: I0123 10:58:03.311076 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:58:03 crc kubenswrapper[4689]: I0123 10:58:03.311806 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:58:12 crc kubenswrapper[4689]: I0123 10:58:12.939593 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-6c6f9b55bb-77n8r" podUID="6207352c-b7c8-44b5-ad56-6a3e7d365198" containerName="console" containerID="cri-o://3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043" gracePeriod=15 Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.414345 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-6c6f9b55bb-77n8r_6207352c-b7c8-44b5-ad56-6a3e7d365198/console/0.log" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.414457 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.612054 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-oauth-serving-cert\") pod \"6207352c-b7c8-44b5-ad56-6a3e7d365198\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.612216 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-config\") pod \"6207352c-b7c8-44b5-ad56-6a3e7d365198\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.612240 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2wrz\" (UniqueName: \"kubernetes.io/projected/6207352c-b7c8-44b5-ad56-6a3e7d365198-kube-api-access-g2wrz\") pod \"6207352c-b7c8-44b5-ad56-6a3e7d365198\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.612938 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-config" (OuterVolumeSpecName: "console-config") pod "6207352c-b7c8-44b5-ad56-6a3e7d365198" (UID: "6207352c-b7c8-44b5-ad56-6a3e7d365198"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.613033 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "6207352c-b7c8-44b5-ad56-6a3e7d365198" (UID: "6207352c-b7c8-44b5-ad56-6a3e7d365198"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.613204 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-oauth-config\") pod \"6207352c-b7c8-44b5-ad56-6a3e7d365198\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.613230 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-trusted-ca-bundle\") pod \"6207352c-b7c8-44b5-ad56-6a3e7d365198\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.613246 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-service-ca\") pod \"6207352c-b7c8-44b5-ad56-6a3e7d365198\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.613735 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-serving-cert\") pod \"6207352c-b7c8-44b5-ad56-6a3e7d365198\" (UID: \"6207352c-b7c8-44b5-ad56-6a3e7d365198\") " Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.613593 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-service-ca" (OuterVolumeSpecName: "service-ca") pod "6207352c-b7c8-44b5-ad56-6a3e7d365198" (UID: "6207352c-b7c8-44b5-ad56-6a3e7d365198"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.613674 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6207352c-b7c8-44b5-ad56-6a3e7d365198" (UID: "6207352c-b7c8-44b5-ad56-6a3e7d365198"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.614437 4689 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.614456 4689 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.614868 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.614883 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6207352c-b7c8-44b5-ad56-6a3e7d365198-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.617684 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6207352c-b7c8-44b5-ad56-6a3e7d365198-kube-api-access-g2wrz" (OuterVolumeSpecName: "kube-api-access-g2wrz") pod "6207352c-b7c8-44b5-ad56-6a3e7d365198" (UID: "6207352c-b7c8-44b5-ad56-6a3e7d365198"). InnerVolumeSpecName "kube-api-access-g2wrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.617701 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "6207352c-b7c8-44b5-ad56-6a3e7d365198" (UID: "6207352c-b7c8-44b5-ad56-6a3e7d365198"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.623324 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "6207352c-b7c8-44b5-ad56-6a3e7d365198" (UID: "6207352c-b7c8-44b5-ad56-6a3e7d365198"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.715894 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2wrz\" (UniqueName: \"kubernetes.io/projected/6207352c-b7c8-44b5-ad56-6a3e7d365198-kube-api-access-g2wrz\") on node \"crc\" DevicePath \"\"" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.715933 4689 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 23 10:58:13 crc kubenswrapper[4689]: I0123 10:58:13.715944 4689 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6207352c-b7c8-44b5-ad56-6a3e7d365198-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.013134 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-6c6f9b55bb-77n8r_6207352c-b7c8-44b5-ad56-6a3e7d365198/console/0.log" Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.013212 4689 generic.go:334] "Generic (PLEG): container finished" podID="6207352c-b7c8-44b5-ad56-6a3e7d365198" containerID="3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043" exitCode=2 Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.013243 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c6f9b55bb-77n8r" event={"ID":"6207352c-b7c8-44b5-ad56-6a3e7d365198","Type":"ContainerDied","Data":"3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043"} Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.013276 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c6f9b55bb-77n8r" event={"ID":"6207352c-b7c8-44b5-ad56-6a3e7d365198","Type":"ContainerDied","Data":"b0def27e2cef495659d08caba10b17deb3ff385e8860e8bae16c50d3a66010d9"} Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.013293 4689 scope.go:117] "RemoveContainer" containerID="3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043" Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.013298 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c6f9b55bb-77n8r" Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.034734 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-6c6f9b55bb-77n8r"] Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.043125 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-6c6f9b55bb-77n8r"] Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.045841 4689 scope.go:117] "RemoveContainer" containerID="3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043" Jan 23 10:58:14 crc kubenswrapper[4689]: E0123 10:58:14.046307 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043\": container with ID starting with 3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043 not found: ID does not exist" containerID="3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043" Jan 23 10:58:14 crc kubenswrapper[4689]: I0123 10:58:14.046348 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043"} err="failed to get container status \"3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043\": rpc error: code = NotFound desc = could not find container \"3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043\": container with ID starting with 3e80346ec664113950b7866161d226a820c647af57639577fc62ab1efae5b043 not found: ID does not exist" Jan 23 10:58:15 crc kubenswrapper[4689]: I0123 10:58:15.652631 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6207352c-b7c8-44b5-ad56-6a3e7d365198" path="/var/lib/kubelet/pods/6207352c-b7c8-44b5-ad56-6a3e7d365198/volumes" Jan 23 10:58:33 crc kubenswrapper[4689]: I0123 10:58:33.311451 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 10:58:33 crc kubenswrapper[4689]: I0123 10:58:33.312231 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 10:58:33 crc kubenswrapper[4689]: I0123 10:58:33.312299 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 10:58:33 crc kubenswrapper[4689]: I0123 10:58:33.313099 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a4159ab9ef2d5639be486fab68cc378ce37a59b9cc85de6b3699c4833d973bdb"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 10:58:33 crc kubenswrapper[4689]: I0123 10:58:33.313233 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://a4159ab9ef2d5639be486fab68cc378ce37a59b9cc85de6b3699c4833d973bdb" gracePeriod=600 Jan 23 10:58:34 crc kubenswrapper[4689]: I0123 10:58:34.173103 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="a4159ab9ef2d5639be486fab68cc378ce37a59b9cc85de6b3699c4833d973bdb" exitCode=0 Jan 23 10:58:34 crc kubenswrapper[4689]: I0123 10:58:34.173174 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"a4159ab9ef2d5639be486fab68cc378ce37a59b9cc85de6b3699c4833d973bdb"} Jan 23 10:58:34 crc kubenswrapper[4689]: I0123 10:58:34.173585 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"6c7330462c260e571fdbe25c842509e5da85ef7832a1583f72e3e82ae187dabf"} Jan 23 10:58:34 crc kubenswrapper[4689]: I0123 10:58:34.173615 4689 scope.go:117] "RemoveContainer" containerID="e315c320f0ad2702681ecbe44e73ceed0977d5c34d8230cf6b96dd29e24d95c8" Jan 23 10:58:55 crc kubenswrapper[4689]: I0123 10:58:55.928605 4689 scope.go:117] "RemoveContainer" containerID="dbc9eea6d0e6506babfc6410f3fe15a361763061b66eb3aa2332a95aa6b70333" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.165749 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28"] Jan 23 11:00:00 crc kubenswrapper[4689]: E0123 11:00:00.167710 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6207352c-b7c8-44b5-ad56-6a3e7d365198" containerName="console" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.167735 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6207352c-b7c8-44b5-ad56-6a3e7d365198" containerName="console" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.167896 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6207352c-b7c8-44b5-ad56-6a3e7d365198" containerName="console" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.168427 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.170113 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.171349 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.172962 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28"] Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.268298 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-secret-volume\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.268345 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-config-volume\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.268376 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb7tw\" (UniqueName: \"kubernetes.io/projected/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-kube-api-access-qb7tw\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.369955 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-secret-volume\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.370024 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-config-volume\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.370081 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb7tw\" (UniqueName: \"kubernetes.io/projected/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-kube-api-access-qb7tw\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.372929 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-config-volume\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.387294 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-secret-volume\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.390364 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb7tw\" (UniqueName: \"kubernetes.io/projected/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-kube-api-access-qb7tw\") pod \"collect-profiles-29486100-zgc28\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.482743 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.674941 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28"] Jan 23 11:00:00 crc kubenswrapper[4689]: I0123 11:00:00.821403 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" event={"ID":"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b","Type":"ContainerStarted","Data":"42d41b5c3efa679fc9ff498e4a974ab2fb9203e51835d58381cd8116caa6b0fd"} Jan 23 11:00:01 crc kubenswrapper[4689]: I0123 11:00:01.827967 4689 generic.go:334] "Generic (PLEG): container finished" podID="0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" containerID="29da1e0721d15f43f6dfc43aa8e1186fee507ee59229a0ac5c8ca295f8383100" exitCode=0 Jan 23 11:00:01 crc kubenswrapper[4689]: I0123 11:00:01.828051 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" event={"ID":"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b","Type":"ContainerDied","Data":"29da1e0721d15f43f6dfc43aa8e1186fee507ee59229a0ac5c8ca295f8383100"} Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.064783 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.108209 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qb7tw\" (UniqueName: \"kubernetes.io/projected/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-kube-api-access-qb7tw\") pod \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.108293 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-config-volume\") pod \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.108384 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-secret-volume\") pod \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\" (UID: \"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b\") " Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.109350 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-config-volume" (OuterVolumeSpecName: "config-volume") pod "0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" (UID: "0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.114232 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-kube-api-access-qb7tw" (OuterVolumeSpecName: "kube-api-access-qb7tw") pod "0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" (UID: "0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b"). InnerVolumeSpecName "kube-api-access-qb7tw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.115122 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" (UID: "0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.210332 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qb7tw\" (UniqueName: \"kubernetes.io/projected/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-kube-api-access-qb7tw\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.210394 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.210409 4689 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.840028 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" event={"ID":"0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b","Type":"ContainerDied","Data":"42d41b5c3efa679fc9ff498e4a974ab2fb9203e51835d58381cd8116caa6b0fd"} Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.840067 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42d41b5c3efa679fc9ff498e4a974ab2fb9203e51835d58381cd8116caa6b0fd" Jan 23 11:00:03 crc kubenswrapper[4689]: I0123 11:00:03.840108 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.589947 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7"] Jan 23 11:00:04 crc kubenswrapper[4689]: E0123 11:00:04.590753 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" containerName="collect-profiles" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.590773 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" containerName="collect-profiles" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.591004 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" containerName="collect-profiles" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.592589 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.594578 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.599434 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7"] Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.630179 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.630363 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.630434 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fcj6\" (UniqueName: \"kubernetes.io/projected/b708d6b3-99ec-4a6e-8942-86cf6bf46362-kube-api-access-2fcj6\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.731963 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fcj6\" (UniqueName: \"kubernetes.io/projected/b708d6b3-99ec-4a6e-8942-86cf6bf46362-kube-api-access-2fcj6\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.732038 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.732198 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.732636 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.732668 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.751439 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fcj6\" (UniqueName: \"kubernetes.io/projected/b708d6b3-99ec-4a6e-8942-86cf6bf46362-kube-api-access-2fcj6\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:04 crc kubenswrapper[4689]: I0123 11:00:04.908571 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:05 crc kubenswrapper[4689]: I0123 11:00:05.136562 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7"] Jan 23 11:00:05 crc kubenswrapper[4689]: I0123 11:00:05.851983 4689 generic.go:334] "Generic (PLEG): container finished" podID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerID="3c9af00016d323f058c21d8c8cde8fe3b37f58f7bfef5d80e3b82d4e2acbf003" exitCode=0 Jan 23 11:00:05 crc kubenswrapper[4689]: I0123 11:00:05.852038 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" event={"ID":"b708d6b3-99ec-4a6e-8942-86cf6bf46362","Type":"ContainerDied","Data":"3c9af00016d323f058c21d8c8cde8fe3b37f58f7bfef5d80e3b82d4e2acbf003"} Jan 23 11:00:05 crc kubenswrapper[4689]: I0123 11:00:05.852287 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" event={"ID":"b708d6b3-99ec-4a6e-8942-86cf6bf46362","Type":"ContainerStarted","Data":"0a5a0714dcaf9ff90bdeaa66a79bc264487930e873d5fb6fb054d01fb751989a"} Jan 23 11:00:07 crc kubenswrapper[4689]: I0123 11:00:07.865118 4689 generic.go:334] "Generic (PLEG): container finished" podID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerID="a8288701833af0dc2f90684c6f0c02470a07262971cfb0dfb0cbc018a05ecb41" exitCode=0 Jan 23 11:00:07 crc kubenswrapper[4689]: I0123 11:00:07.865195 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" event={"ID":"b708d6b3-99ec-4a6e-8942-86cf6bf46362","Type":"ContainerDied","Data":"a8288701833af0dc2f90684c6f0c02470a07262971cfb0dfb0cbc018a05ecb41"} Jan 23 11:00:08 crc kubenswrapper[4689]: I0123 11:00:08.873027 4689 generic.go:334] "Generic (PLEG): container finished" podID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerID="d2cef791a0c64a7c772d68e8d767911ba4e29c53c1eb2c1587d649589ecd1cda" exitCode=0 Jan 23 11:00:08 crc kubenswrapper[4689]: I0123 11:00:08.873069 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" event={"ID":"b708d6b3-99ec-4a6e-8942-86cf6bf46362","Type":"ContainerDied","Data":"d2cef791a0c64a7c772d68e8d767911ba4e29c53c1eb2c1587d649589ecd1cda"} Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.123876 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.211803 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fcj6\" (UniqueName: \"kubernetes.io/projected/b708d6b3-99ec-4a6e-8942-86cf6bf46362-kube-api-access-2fcj6\") pod \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.211931 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-bundle\") pod \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.211984 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-util\") pod \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\" (UID: \"b708d6b3-99ec-4a6e-8942-86cf6bf46362\") " Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.214725 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-bundle" (OuterVolumeSpecName: "bundle") pod "b708d6b3-99ec-4a6e-8942-86cf6bf46362" (UID: "b708d6b3-99ec-4a6e-8942-86cf6bf46362"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.218331 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b708d6b3-99ec-4a6e-8942-86cf6bf46362-kube-api-access-2fcj6" (OuterVolumeSpecName: "kube-api-access-2fcj6") pod "b708d6b3-99ec-4a6e-8942-86cf6bf46362" (UID: "b708d6b3-99ec-4a6e-8942-86cf6bf46362"). InnerVolumeSpecName "kube-api-access-2fcj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.227210 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-util" (OuterVolumeSpecName: "util") pod "b708d6b3-99ec-4a6e-8942-86cf6bf46362" (UID: "b708d6b3-99ec-4a6e-8942-86cf6bf46362"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.313244 4689 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.313271 4689 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b708d6b3-99ec-4a6e-8942-86cf6bf46362-util\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.313281 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fcj6\" (UniqueName: \"kubernetes.io/projected/b708d6b3-99ec-4a6e-8942-86cf6bf46362-kube-api-access-2fcj6\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.898694 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" event={"ID":"b708d6b3-99ec-4a6e-8942-86cf6bf46362","Type":"ContainerDied","Data":"0a5a0714dcaf9ff90bdeaa66a79bc264487930e873d5fb6fb054d01fb751989a"} Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.899056 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a5a0714dcaf9ff90bdeaa66a79bc264487930e873d5fb6fb054d01fb751989a" Jan 23 11:00:10 crc kubenswrapper[4689]: I0123 11:00:10.898823 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.207396 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jpm9c"] Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.208090 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-controller" containerID="cri-o://0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.208193 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="nbdb" containerID="cri-o://b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.208244 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="northd" containerID="cri-o://0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.208307 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-acl-logging" containerID="cri-o://6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.208363 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-node" containerID="cri-o://507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.208361 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.208336 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="sbdb" containerID="cri-o://2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.244376 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" containerID="cri-o://729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b" gracePeriod=30 Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.543789 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3 is running failed: container process not found" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.543871 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4 is running failed: container process not found" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.544110 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3 is running failed: container process not found" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.544209 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4 is running failed: container process not found" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.544494 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3 is running failed: container process not found" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.544520 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="nbdb" Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.544582 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4 is running failed: container process not found" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Jan 23 11:00:16 crc kubenswrapper[4689]: E0123 11:00:16.544598 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="sbdb" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.945701 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovnkube-controller/2.log" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.947717 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovn-acl-logging/0.log" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948241 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovn-controller/0.log" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948591 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b" exitCode=0 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948621 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" exitCode=0 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948631 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" exitCode=0 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948638 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca" exitCode=0 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948644 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5" exitCode=143 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948652 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74" exitCode=143 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948689 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b"} Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948715 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4"} Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948725 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3"} Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948735 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca"} Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948744 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5"} Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948753 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74"} Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.948768 4689 scope.go:117] "RemoveContainer" containerID="6351034cc8f06ccb98deb86fd8165005006ac87ef826df90dc9ea45c6dddeda0" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.952780 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-xxklh_d5f32f36-d66c-4202-ac54-e81c6d978146/kube-multus/1.log" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.953260 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-xxklh_d5f32f36-d66c-4202-ac54-e81c6d978146/kube-multus/0.log" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.953281 4689 generic.go:334] "Generic (PLEG): container finished" podID="d5f32f36-d66c-4202-ac54-e81c6d978146" containerID="d3d9560b72386cc0d0e85ef107cbd87d3526654d4f7d257611190f530bf11128" exitCode=2 Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.953299 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-xxklh" event={"ID":"d5f32f36-d66c-4202-ac54-e81c6d978146","Type":"ContainerDied","Data":"d3d9560b72386cc0d0e85ef107cbd87d3526654d4f7d257611190f530bf11128"} Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.953770 4689 scope.go:117] "RemoveContainer" containerID="d3d9560b72386cc0d0e85ef107cbd87d3526654d4f7d257611190f530bf11128" Jan 23 11:00:16 crc kubenswrapper[4689]: I0123 11:00:16.976732 4689 scope.go:117] "RemoveContainer" containerID="66ff2299b96f8c1abb706fb61658e9419cdd24c6afc69cdc23f0c8e38cce8f9f" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.533717 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovn-acl-logging/0.log" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.534378 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovn-controller/0.log" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.534807 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598263 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fjpqb"] Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598540 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="northd" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598562 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="northd" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598573 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598581 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598590 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerName="extract" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598597 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerName="extract" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598607 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kubecfg-setup" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598616 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kubecfg-setup" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598625 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-ovn-metrics" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598633 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-ovn-metrics" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598651 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-acl-logging" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598660 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-acl-logging" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598673 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerName="pull" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598680 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerName="pull" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598690 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="sbdb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598697 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="sbdb" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598705 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598714 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598729 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerName="util" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598737 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerName="util" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598749 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="nbdb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598757 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="nbdb" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598766 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-node" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598774 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-node" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598783 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598791 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.598804 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598811 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598939 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-ovn-metrics" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598953 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="sbdb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598961 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b708d6b3-99ec-4a6e-8942-86cf6bf46362" containerName="extract" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598971 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="nbdb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598983 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.598992 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.599005 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="kube-rbac-proxy-node" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.599014 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.599024 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-acl-logging" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.599038 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="northd" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.599045 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovn-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: E0123 11:00:17.599216 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.599228 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.599354 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206d70b-3d3b-404c-b969-713242a23d38" containerName="ovnkube-controller" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.601685 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621668 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-systemd-units\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621729 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-script-lib\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621759 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkjb8\" (UniqueName: \"kubernetes.io/projected/5206d70b-3d3b-404c-b969-713242a23d38-kube-api-access-qkjb8\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621825 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-systemd\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621849 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-openvswitch\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621883 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5206d70b-3d3b-404c-b969-713242a23d38-ovn-node-metrics-cert\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621913 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-var-lib-openvswitch\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621948 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-bin\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.621979 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-env-overrides\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622003 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-netns\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622025 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-netd\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622047 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-ovn\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622076 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-var-lib-cni-networks-ovn-kubernetes\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622109 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-config\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622138 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-etc-openvswitch\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622216 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-log-socket\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622264 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-kubelet\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622292 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-ovn-kubernetes\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622312 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-slash\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622334 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-node-log\") pod \"5206d70b-3d3b-404c-b969-713242a23d38\" (UID: \"5206d70b-3d3b-404c-b969-713242a23d38\") " Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622500 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovnkube-config\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622535 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-systemd-units\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622555 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-node-log\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622578 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-log-socket\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622612 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-etc-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622640 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-slash\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622665 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-run-netns\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622698 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-cni-netd\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622718 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovn-node-metrics-cert\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622754 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-systemd\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622781 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-var-lib-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622802 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622823 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-cni-bin\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622847 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-run-ovn-kubernetes\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622870 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622898 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9nzk\" (UniqueName: \"kubernetes.io/projected/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-kube-api-access-b9nzk\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622926 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-kubelet\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622945 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-ovn\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622964 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-env-overrides\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.622989 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovnkube-script-lib\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.623066 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.623531 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624456 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624506 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624504 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-log-socket" (OuterVolumeSpecName: "log-socket") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624504 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624538 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624555 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624570 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-slash" (OuterVolumeSpecName: "host-slash") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624576 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624604 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624622 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624649 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-node-log" (OuterVolumeSpecName: "node-log") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624670 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624673 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624909 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.624919 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.635683 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5206d70b-3d3b-404c-b969-713242a23d38-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.635859 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5206d70b-3d3b-404c-b969-713242a23d38-kube-api-access-qkjb8" (OuterVolumeSpecName: "kube-api-access-qkjb8") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "kube-api-access-qkjb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.647575 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "5206d70b-3d3b-404c-b969-713242a23d38" (UID: "5206d70b-3d3b-404c-b969-713242a23d38"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.724873 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-kubelet\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725012 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-kubelet\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725287 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-ovn\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725250 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-ovn\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725418 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-env-overrides\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725469 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovnkube-script-lib\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725538 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovnkube-config\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725596 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-systemd-units\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725770 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-node-log\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725819 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-log-socket\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725896 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-etc-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725943 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-node-log\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725955 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-slash\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725988 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-slash\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726012 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-env-overrides\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726042 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-run-netns\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726054 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-etc-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.725679 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-systemd-units\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726045 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-log-socket\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726093 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-run-netns\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726201 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-cni-netd\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726234 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovn-node-metrics-cert\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726305 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-cni-netd\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726330 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-systemd\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726383 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovnkube-config\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726417 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-systemd\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726423 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-var-lib-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726441 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726458 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-cni-bin\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726457 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovnkube-script-lib\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726481 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-run-ovn-kubernetes\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726502 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-run-ovn-kubernetes\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726505 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-run-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726466 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-var-lib-openvswitch\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726528 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-cni-bin\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726539 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726533 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726573 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9nzk\" (UniqueName: \"kubernetes.io/projected/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-kube-api-access-b9nzk\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726638 4689 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726649 4689 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726658 4689 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726667 4689 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726676 4689 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726686 4689 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726694 4689 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726703 4689 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-log-socket\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726711 4689 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726720 4689 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726729 4689 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-node-log\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726737 4689 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-slash\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726745 4689 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726753 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkjb8\" (UniqueName: \"kubernetes.io/projected/5206d70b-3d3b-404c-b969-713242a23d38-kube-api-access-qkjb8\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726761 4689 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5206d70b-3d3b-404c-b969-713242a23d38-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726769 4689 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726777 4689 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726785 4689 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5206d70b-3d3b-404c-b969-713242a23d38-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726792 4689 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.726800 4689 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5206d70b-3d3b-404c-b969-713242a23d38-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.729263 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-ovn-node-metrics-cert\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.745271 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9nzk\" (UniqueName: \"kubernetes.io/projected/6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce-kube-api-access-b9nzk\") pod \"ovnkube-node-fjpqb\" (UID: \"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce\") " pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.918957 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.967668 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"40a37fd74355c8f2658b02429a44a16ec439f8fb9d5cf581871722db2b313f23"} Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.972869 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovn-acl-logging/0.log" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975051 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-jpm9c_5206d70b-3d3b-404c-b969-713242a23d38/ovn-controller/0.log" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975626 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d" exitCode=0 Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975663 4689 generic.go:334] "Generic (PLEG): container finished" podID="5206d70b-3d3b-404c-b969-713242a23d38" containerID="507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029" exitCode=0 Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975697 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975741 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d"} Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975797 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029"} Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975811 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-jpm9c" event={"ID":"5206d70b-3d3b-404c-b969-713242a23d38","Type":"ContainerDied","Data":"7df3457bf767010ab3e580b904b15f4ac21e21ff213952416ecee3116ae10999"} Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.975843 4689 scope.go:117] "RemoveContainer" containerID="729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.982343 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-xxklh_d5f32f36-d66c-4202-ac54-e81c6d978146/kube-multus/1.log" Jan 23 11:00:17 crc kubenswrapper[4689]: I0123 11:00:17.982422 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-xxklh" event={"ID":"d5f32f36-d66c-4202-ac54-e81c6d978146","Type":"ContainerStarted","Data":"48bfce90b2448162d4f9f1344867aa6dcf8ce05258ed164833aad8c9279b3ac3"} Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.000081 4689 scope.go:117] "RemoveContainer" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.020828 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jpm9c"] Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.025373 4689 scope.go:117] "RemoveContainer" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.027347 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-jpm9c"] Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.044429 4689 scope.go:117] "RemoveContainer" containerID="0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.066449 4689 scope.go:117] "RemoveContainer" containerID="a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.095504 4689 scope.go:117] "RemoveContainer" containerID="507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.103663 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb"] Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.104494 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.107906 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-g6cl8" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.112774 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.112911 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.132742 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsrpg\" (UniqueName: \"kubernetes.io/projected/55938c76-e594-4530-b7ee-0e7f3089063e-kube-api-access-nsrpg\") pod \"obo-prometheus-operator-68bc856cb9-5spsb\" (UID: \"55938c76-e594-4530-b7ee-0e7f3089063e\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.135218 4689 scope.go:117] "RemoveContainer" containerID="6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.152585 4689 scope.go:117] "RemoveContainer" containerID="0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.171626 4689 scope.go:117] "RemoveContainer" containerID="5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.185479 4689 scope.go:117] "RemoveContainer" containerID="729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.186117 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b\": container with ID starting with 729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b not found: ID does not exist" containerID="729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.186178 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b"} err="failed to get container status \"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b\": rpc error: code = NotFound desc = could not find container \"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b\": container with ID starting with 729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.186211 4689 scope.go:117] "RemoveContainer" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.186596 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\": container with ID starting with 2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4 not found: ID does not exist" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.186655 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4"} err="failed to get container status \"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\": rpc error: code = NotFound desc = could not find container \"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\": container with ID starting with 2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.186689 4689 scope.go:117] "RemoveContainer" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.187002 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\": container with ID starting with b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3 not found: ID does not exist" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187022 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3"} err="failed to get container status \"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\": rpc error: code = NotFound desc = could not find container \"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\": container with ID starting with b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187035 4689 scope.go:117] "RemoveContainer" containerID="0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.187355 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\": container with ID starting with 0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca not found: ID does not exist" containerID="0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187396 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca"} err="failed to get container status \"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\": rpc error: code = NotFound desc = could not find container \"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\": container with ID starting with 0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187418 4689 scope.go:117] "RemoveContainer" containerID="a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.187661 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\": container with ID starting with a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d not found: ID does not exist" containerID="a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187685 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d"} err="failed to get container status \"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\": rpc error: code = NotFound desc = could not find container \"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\": container with ID starting with a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187704 4689 scope.go:117] "RemoveContainer" containerID="507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.187920 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\": container with ID starting with 507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029 not found: ID does not exist" containerID="507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187941 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029"} err="failed to get container status \"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\": rpc error: code = NotFound desc = could not find container \"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\": container with ID starting with 507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.187954 4689 scope.go:117] "RemoveContainer" containerID="6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.188195 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\": container with ID starting with 6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5 not found: ID does not exist" containerID="6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.188217 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5"} err="failed to get container status \"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\": rpc error: code = NotFound desc = could not find container \"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\": container with ID starting with 6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.188230 4689 scope.go:117] "RemoveContainer" containerID="0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.188457 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\": container with ID starting with 0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74 not found: ID does not exist" containerID="0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.188483 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74"} err="failed to get container status \"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\": rpc error: code = NotFound desc = could not find container \"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\": container with ID starting with 0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.188499 4689 scope.go:117] "RemoveContainer" containerID="5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.188764 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\": container with ID starting with 5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f not found: ID does not exist" containerID="5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.188819 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f"} err="failed to get container status \"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\": rpc error: code = NotFound desc = could not find container \"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\": container with ID starting with 5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.188851 4689 scope.go:117] "RemoveContainer" containerID="729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.189118 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b"} err="failed to get container status \"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b\": rpc error: code = NotFound desc = could not find container \"729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b\": container with ID starting with 729b15e26a1f5a731716e18b72a58ee7915386938d3fe923fb6d224a7bc7b27b not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.189166 4689 scope.go:117] "RemoveContainer" containerID="2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.189419 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4"} err="failed to get container status \"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\": rpc error: code = NotFound desc = could not find container \"2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4\": container with ID starting with 2109569e221918c7a73620e2ab80a03a74e5d432d9c1f3dac4f7f935e600d5f4 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.189445 4689 scope.go:117] "RemoveContainer" containerID="b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.189684 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3"} err="failed to get container status \"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\": rpc error: code = NotFound desc = could not find container \"b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3\": container with ID starting with b7860b4ff52bf4d80f08b8bec18faf18c95b261211587cf41048bd838cd04bd3 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.189711 4689 scope.go:117] "RemoveContainer" containerID="0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.190072 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca"} err="failed to get container status \"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\": rpc error: code = NotFound desc = could not find container \"0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca\": container with ID starting with 0397de32f1b33ad6a1ab0360735152d81644db098e44d49401c48f53590b22ca not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.190099 4689 scope.go:117] "RemoveContainer" containerID="a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.190411 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d"} err="failed to get container status \"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\": rpc error: code = NotFound desc = could not find container \"a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d\": container with ID starting with a4e22624d3af07ff2df75b7c07c859fcc7662aea1d09c8b320adee01ca82965d not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.190435 4689 scope.go:117] "RemoveContainer" containerID="507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.190737 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029"} err="failed to get container status \"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\": rpc error: code = NotFound desc = could not find container \"507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029\": container with ID starting with 507e806f8dcfab390d460035e619120c184680cbdfe198c98cd5df4039350029 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.190761 4689 scope.go:117] "RemoveContainer" containerID="6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.191043 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5"} err="failed to get container status \"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\": rpc error: code = NotFound desc = could not find container \"6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5\": container with ID starting with 6a5c2c8724956a44f4fcf9ed9ab49f2bf30fef8d3b9dcc99d48fb47cbc8659c5 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.191073 4689 scope.go:117] "RemoveContainer" containerID="0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.191331 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74"} err="failed to get container status \"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\": rpc error: code = NotFound desc = could not find container \"0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74\": container with ID starting with 0976167a924bce12fffc3f73cfbc04fa395c72b4b55bbddbf8bda4eed8bbba74 not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.191350 4689 scope.go:117] "RemoveContainer" containerID="5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.191604 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f"} err="failed to get container status \"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\": rpc error: code = NotFound desc = could not find container \"5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f\": container with ID starting with 5919676cfe8cd577a77427ece68abc51df6cd5106df82be58291a1495b718b2f not found: ID does not exist" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.224608 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk"] Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.225297 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.229462 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.234084 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsrpg\" (UniqueName: \"kubernetes.io/projected/55938c76-e594-4530-b7ee-0e7f3089063e-kube-api-access-nsrpg\") pod \"obo-prometheus-operator-68bc856cb9-5spsb\" (UID: \"55938c76-e594-4530-b7ee-0e7f3089063e\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.235291 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv"] Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.236180 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.238071 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-x5dxv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.254461 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsrpg\" (UniqueName: \"kubernetes.io/projected/55938c76-e594-4530-b7ee-0e7f3089063e-kube-api-access-nsrpg\") pod \"obo-prometheus-operator-68bc856cb9-5spsb\" (UID: \"55938c76-e594-4530-b7ee-0e7f3089063e\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.335347 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cf8299a2-da8d-489e-bc54-1212dd8d3099-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv\" (UID: \"cf8299a2-da8d-489e-bc54-1212dd8d3099\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.335424 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2805243a-4a45-4dc0-b5a2-91c2163e11b4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk\" (UID: \"2805243a-4a45-4dc0-b5a2-91c2163e11b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.335456 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cf8299a2-da8d-489e-bc54-1212dd8d3099-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv\" (UID: \"cf8299a2-da8d-489e-bc54-1212dd8d3099\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.335495 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2805243a-4a45-4dc0-b5a2-91c2163e11b4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk\" (UID: \"2805243a-4a45-4dc0-b5a2-91c2163e11b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.352782 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-95bv6"] Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.353571 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.355587 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.355589 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-kpzsq" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.436440 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2805243a-4a45-4dc0-b5a2-91c2163e11b4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk\" (UID: \"2805243a-4a45-4dc0-b5a2-91c2163e11b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.436499 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/45cadce8-d2da-450b-9b37-c2a6b2a1c595-observability-operator-tls\") pod \"observability-operator-59bdc8b94-95bv6\" (UID: \"45cadce8-d2da-450b-9b37-c2a6b2a1c595\") " pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.436549 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm6g2\" (UniqueName: \"kubernetes.io/projected/45cadce8-d2da-450b-9b37-c2a6b2a1c595-kube-api-access-pm6g2\") pod \"observability-operator-59bdc8b94-95bv6\" (UID: \"45cadce8-d2da-450b-9b37-c2a6b2a1c595\") " pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.436838 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cf8299a2-da8d-489e-bc54-1212dd8d3099-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv\" (UID: \"cf8299a2-da8d-489e-bc54-1212dd8d3099\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.436933 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2805243a-4a45-4dc0-b5a2-91c2163e11b4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk\" (UID: \"2805243a-4a45-4dc0-b5a2-91c2163e11b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.436973 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cf8299a2-da8d-489e-bc54-1212dd8d3099-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv\" (UID: \"cf8299a2-da8d-489e-bc54-1212dd8d3099\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.441460 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cf8299a2-da8d-489e-bc54-1212dd8d3099-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv\" (UID: \"cf8299a2-da8d-489e-bc54-1212dd8d3099\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.441588 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2805243a-4a45-4dc0-b5a2-91c2163e11b4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk\" (UID: \"2805243a-4a45-4dc0-b5a2-91c2163e11b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.441784 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2805243a-4a45-4dc0-b5a2-91c2163e11b4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk\" (UID: \"2805243a-4a45-4dc0-b5a2-91c2163e11b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.446473 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.460102 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cf8299a2-da8d-489e-bc54-1212dd8d3099-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv\" (UID: \"cf8299a2-da8d-489e-bc54-1212dd8d3099\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.469449 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(f3f13d6ddf7cf712e0fc86b263ced3bb3d9ea585ecd2f7cb66c13fe15d6d6e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.469549 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(f3f13d6ddf7cf712e0fc86b263ced3bb3d9ea585ecd2f7cb66c13fe15d6d6e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.469600 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(f3f13d6ddf7cf712e0fc86b263ced3bb3d9ea585ecd2f7cb66c13fe15d6d6e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.469650 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators(55938c76-e594-4530-b7ee-0e7f3089063e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators(55938c76-e594-4530-b7ee-0e7f3089063e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(f3f13d6ddf7cf712e0fc86b263ced3bb3d9ea585ecd2f7cb66c13fe15d6d6e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" podUID="55938c76-e594-4530-b7ee-0e7f3089063e" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.528717 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-7qpjs"] Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.529912 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.533614 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-6xnwz" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.538315 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/45cadce8-d2da-450b-9b37-c2a6b2a1c595-observability-operator-tls\") pod \"observability-operator-59bdc8b94-95bv6\" (UID: \"45cadce8-d2da-450b-9b37-c2a6b2a1c595\") " pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.538374 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm6g2\" (UniqueName: \"kubernetes.io/projected/45cadce8-d2da-450b-9b37-c2a6b2a1c595-kube-api-access-pm6g2\") pod \"observability-operator-59bdc8b94-95bv6\" (UID: \"45cadce8-d2da-450b-9b37-c2a6b2a1c595\") " pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.538949 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.542321 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/45cadce8-d2da-450b-9b37-c2a6b2a1c595-observability-operator-tls\") pod \"observability-operator-59bdc8b94-95bv6\" (UID: \"45cadce8-d2da-450b-9b37-c2a6b2a1c595\") " pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.551494 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.559981 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm6g2\" (UniqueName: \"kubernetes.io/projected/45cadce8-d2da-450b-9b37-c2a6b2a1c595-kube-api-access-pm6g2\") pod \"observability-operator-59bdc8b94-95bv6\" (UID: \"45cadce8-d2da-450b-9b37-c2a6b2a1c595\") " pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.572575 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(fe4649458bbd173e4579fd1dff89a746c5c4e2786ca7ce3196adbb5cca4ea134): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.572638 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(fe4649458bbd173e4579fd1dff89a746c5c4e2786ca7ce3196adbb5cca4ea134): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.572664 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(fe4649458bbd173e4579fd1dff89a746c5c4e2786ca7ce3196adbb5cca4ea134): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.572712 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators(2805243a-4a45-4dc0-b5a2-91c2163e11b4)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators(2805243a-4a45-4dc0-b5a2-91c2163e11b4)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(fe4649458bbd173e4579fd1dff89a746c5c4e2786ca7ce3196adbb5cca4ea134): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" podUID="2805243a-4a45-4dc0-b5a2-91c2163e11b4" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.586529 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(a2d531c167f9c8d03dd316cc69dd6dde98f8d436609d5fd9de4f790357997b94): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.586643 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(a2d531c167f9c8d03dd316cc69dd6dde98f8d436609d5fd9de4f790357997b94): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.586730 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(a2d531c167f9c8d03dd316cc69dd6dde98f8d436609d5fd9de4f790357997b94): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.586821 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators(cf8299a2-da8d-489e-bc54-1212dd8d3099)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators(cf8299a2-da8d-489e-bc54-1212dd8d3099)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(a2d531c167f9c8d03dd316cc69dd6dde98f8d436609d5fd9de4f790357997b94): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" podUID="cf8299a2-da8d-489e-bc54-1212dd8d3099" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.639963 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/a13e2123-3780-4c13-b8a4-760d31e5636e-openshift-service-ca\") pod \"perses-operator-5bf474d74f-7qpjs\" (UID: \"a13e2123-3780-4c13-b8a4-760d31e5636e\") " pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.640347 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stwqs\" (UniqueName: \"kubernetes.io/projected/a13e2123-3780-4c13-b8a4-760d31e5636e-kube-api-access-stwqs\") pod \"perses-operator-5bf474d74f-7qpjs\" (UID: \"a13e2123-3780-4c13-b8a4-760d31e5636e\") " pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.689965 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.715987 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(8ac7abd47360f2ccbfe260435e71c2764ef6fd21980870750669d527b68efd0c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.716056 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(8ac7abd47360f2ccbfe260435e71c2764ef6fd21980870750669d527b68efd0c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.716077 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(8ac7abd47360f2ccbfe260435e71c2764ef6fd21980870750669d527b68efd0c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.716123 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-95bv6_openshift-operators(45cadce8-d2da-450b-9b37-c2a6b2a1c595)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-95bv6_openshift-operators(45cadce8-d2da-450b-9b37-c2a6b2a1c595)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(8ac7abd47360f2ccbfe260435e71c2764ef6fd21980870750669d527b68efd0c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.741572 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/a13e2123-3780-4c13-b8a4-760d31e5636e-openshift-service-ca\") pod \"perses-operator-5bf474d74f-7qpjs\" (UID: \"a13e2123-3780-4c13-b8a4-760d31e5636e\") " pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.741687 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stwqs\" (UniqueName: \"kubernetes.io/projected/a13e2123-3780-4c13-b8a4-760d31e5636e-kube-api-access-stwqs\") pod \"perses-operator-5bf474d74f-7qpjs\" (UID: \"a13e2123-3780-4c13-b8a4-760d31e5636e\") " pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.743123 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/a13e2123-3780-4c13-b8a4-760d31e5636e-openshift-service-ca\") pod \"perses-operator-5bf474d74f-7qpjs\" (UID: \"a13e2123-3780-4c13-b8a4-760d31e5636e\") " pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.757171 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stwqs\" (UniqueName: \"kubernetes.io/projected/a13e2123-3780-4c13-b8a4-760d31e5636e-kube-api-access-stwqs\") pod \"perses-operator-5bf474d74f-7qpjs\" (UID: \"a13e2123-3780-4c13-b8a4-760d31e5636e\") " pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.878497 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.898707 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(83f0254c5f63c3bebc36a657a816d052d090c41c1c262bdd374a756cfa2d7110): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.898763 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(83f0254c5f63c3bebc36a657a816d052d090c41c1c262bdd374a756cfa2d7110): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.898783 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(83f0254c5f63c3bebc36a657a816d052d090c41c1c262bdd374a756cfa2d7110): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:18 crc kubenswrapper[4689]: E0123 11:00:18.898819 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-7qpjs_openshift-operators(a13e2123-3780-4c13-b8a4-760d31e5636e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-7qpjs_openshift-operators(a13e2123-3780-4c13-b8a4-760d31e5636e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(83f0254c5f63c3bebc36a657a816d052d090c41c1c262bdd374a756cfa2d7110): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.989688 4689 generic.go:334] "Generic (PLEG): container finished" podID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerID="d55d9dcfb587b32c78dab2ddd04348247a013626afcdc50ec0fb4d8bd92e0b2a" exitCode=0 Jan 23 11:00:18 crc kubenswrapper[4689]: I0123 11:00:18.989733 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerDied","Data":"d55d9dcfb587b32c78dab2ddd04348247a013626afcdc50ec0fb4d8bd92e0b2a"} Jan 23 11:00:19 crc kubenswrapper[4689]: I0123 11:00:19.649581 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5206d70b-3d3b-404c-b969-713242a23d38" path="/var/lib/kubelet/pods/5206d70b-3d3b-404c-b969-713242a23d38/volumes" Jan 23 11:00:19 crc kubenswrapper[4689]: I0123 11:00:19.999555 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"438c727fd124c12444f4754330520f86626cf53a1239a59d88b9f34c9cfae2a2"} Jan 23 11:00:19 crc kubenswrapper[4689]: I0123 11:00:19.999910 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"9147bcb1383a9bcf6192dae20b77e45ea226ba9cdd5898b8de1a5e6dc4391201"} Jan 23 11:00:20 crc kubenswrapper[4689]: I0123 11:00:19.999923 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"9290d611fb4cb3a80ced12758e773aa08507317929c940d048fad40872ad25cd"} Jan 23 11:00:20 crc kubenswrapper[4689]: I0123 11:00:19.999935 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"e620d8cbbf9f71e1abe413f7f4709019964254d68306b0e9044d2ea6f3b9c462"} Jan 23 11:00:20 crc kubenswrapper[4689]: I0123 11:00:19.999945 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"e1d54a73c5a61286c88444e02952291bfa28e5c3a20a737d8c7c945b6ef106c9"} Jan 23 11:00:20 crc kubenswrapper[4689]: I0123 11:00:19.999956 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"0e75b61ebece9a5e868189d67e367ab6d424a7c23b54d7e2d1d1fdbc33eed530"} Jan 23 11:00:23 crc kubenswrapper[4689]: I0123 11:00:23.022970 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"f4bc40ee434bd4c890f8046ae4c65716ab355785510a0eab9d092fa0b922bece"} Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.050268 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" event={"ID":"6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce","Type":"ContainerStarted","Data":"1b60addfb06dcc58e63991a5f03af627c80aa0736f6ace4ae8cf9fba64c14837"} Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.050804 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.050816 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.050824 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.090221 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.095130 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podStartSLOduration=11.095108037 podStartE2EDuration="11.095108037s" podCreationTimestamp="2026-01-23 11:00:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:00:28.08942981 +0000 UTC m=+692.714109669" watchObservedRunningTime="2026-01-23 11:00:28.095108037 +0000 UTC m=+692.719787896" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.101314 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.610419 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-95bv6"] Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.610903 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.611412 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.627700 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk"] Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.627814 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.628226 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.642579 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb"] Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.642683 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.643052 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.649359 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(560661a66f1caaacf191d718019a08494ea0c1c63cfee98c72d08c7274523720): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.649436 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(560661a66f1caaacf191d718019a08494ea0c1c63cfee98c72d08c7274523720): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.649467 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(560661a66f1caaacf191d718019a08494ea0c1c63cfee98c72d08c7274523720): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.649525 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-95bv6_openshift-operators(45cadce8-d2da-450b-9b37-c2a6b2a1c595)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-95bv6_openshift-operators(45cadce8-d2da-450b-9b37-c2a6b2a1c595)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-95bv6_openshift-operators_45cadce8-d2da-450b-9b37-c2a6b2a1c595_0(560661a66f1caaacf191d718019a08494ea0c1c63cfee98c72d08c7274523720): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.664933 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv"] Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.665066 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.665807 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.678824 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-7qpjs"] Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.678950 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:28 crc kubenswrapper[4689]: I0123 11:00:28.679460 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.711038 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(6f411ae88c3cf03372a03b422f9358e47c98e179280dba7e7bd688ee033d998b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.711100 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(6f411ae88c3cf03372a03b422f9358e47c98e179280dba7e7bd688ee033d998b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.711123 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(6f411ae88c3cf03372a03b422f9358e47c98e179280dba7e7bd688ee033d998b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.711178 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators(2805243a-4a45-4dc0-b5a2-91c2163e11b4)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators(2805243a-4a45-4dc0-b5a2-91c2163e11b4)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_openshift-operators_2805243a-4a45-4dc0-b5a2-91c2163e11b4_0(6f411ae88c3cf03372a03b422f9358e47c98e179280dba7e7bd688ee033d998b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" podUID="2805243a-4a45-4dc0-b5a2-91c2163e11b4" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.739313 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(83aab66af8fd272af6e5d79ea20f3bab7741764f739b0f2086edbffccc65926e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.739381 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(83aab66af8fd272af6e5d79ea20f3bab7741764f739b0f2086edbffccc65926e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.739401 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(83aab66af8fd272af6e5d79ea20f3bab7741764f739b0f2086edbffccc65926e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.739438 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators(55938c76-e594-4530-b7ee-0e7f3089063e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators(55938c76-e594-4530-b7ee-0e7f3089063e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-5spsb_openshift-operators_55938c76-e594-4530-b7ee-0e7f3089063e_0(83aab66af8fd272af6e5d79ea20f3bab7741764f739b0f2086edbffccc65926e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" podUID="55938c76-e594-4530-b7ee-0e7f3089063e" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.750537 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(6c6cb6b8d7834ac9b0aa7f41c57557e049d4a654456cffc9487270415835045f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.750588 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(6c6cb6b8d7834ac9b0aa7f41c57557e049d4a654456cffc9487270415835045f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.750603 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(6c6cb6b8d7834ac9b0aa7f41c57557e049d4a654456cffc9487270415835045f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.750636 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators(cf8299a2-da8d-489e-bc54-1212dd8d3099)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators(cf8299a2-da8d-489e-bc54-1212dd8d3099)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_openshift-operators_cf8299a2-da8d-489e-bc54-1212dd8d3099_0(6c6cb6b8d7834ac9b0aa7f41c57557e049d4a654456cffc9487270415835045f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" podUID="cf8299a2-da8d-489e-bc54-1212dd8d3099" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.758995 4689 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(97878687e195d8d7fd957f639bcd4329e844d88fc301616fca0fa32c4e0da4fd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.759040 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(97878687e195d8d7fd957f639bcd4329e844d88fc301616fca0fa32c4e0da4fd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.759059 4689 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(97878687e195d8d7fd957f639bcd4329e844d88fc301616fca0fa32c4e0da4fd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:28 crc kubenswrapper[4689]: E0123 11:00:28.759089 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-7qpjs_openshift-operators(a13e2123-3780-4c13-b8a4-760d31e5636e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-7qpjs_openshift-operators(a13e2123-3780-4c13-b8a4-760d31e5636e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-7qpjs_openshift-operators_a13e2123-3780-4c13-b8a4-760d31e5636e_0(97878687e195d8d7fd957f639bcd4329e844d88fc301616fca0fa32c4e0da4fd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" Jan 23 11:00:33 crc kubenswrapper[4689]: I0123 11:00:33.310601 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:00:33 crc kubenswrapper[4689]: I0123 11:00:33.310867 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:00:38 crc kubenswrapper[4689]: I0123 11:00:38.571883 4689 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 23 11:00:41 crc kubenswrapper[4689]: I0123 11:00:41.639429 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:41 crc kubenswrapper[4689]: I0123 11:00:41.639507 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:41 crc kubenswrapper[4689]: I0123 11:00:41.640656 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:41 crc kubenswrapper[4689]: I0123 11:00:41.640871 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" Jan 23 11:00:41 crc kubenswrapper[4689]: I0123 11:00:41.884102 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-95bv6"] Jan 23 11:00:41 crc kubenswrapper[4689]: I0123 11:00:41.892374 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:00:41 crc kubenswrapper[4689]: I0123 11:00:41.933705 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv"] Jan 23 11:00:41 crc kubenswrapper[4689]: W0123 11:00:41.937886 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf8299a2_da8d_489e_bc54_1212dd8d3099.slice/crio-9d8845b549137d4bc6185d207f3800b7f058c658ef3e9deeffc8beaccb0da045 WatchSource:0}: Error finding container 9d8845b549137d4bc6185d207f3800b7f058c658ef3e9deeffc8beaccb0da045: Status 404 returned error can't find the container with id 9d8845b549137d4bc6185d207f3800b7f058c658ef3e9deeffc8beaccb0da045 Jan 23 11:00:42 crc kubenswrapper[4689]: I0123 11:00:42.566110 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" event={"ID":"45cadce8-d2da-450b-9b37-c2a6b2a1c595","Type":"ContainerStarted","Data":"3d51b76b2916408994434dd167bbef28a3751e3b1a1ad3b34f632db071a596b1"} Jan 23 11:00:42 crc kubenswrapper[4689]: I0123 11:00:42.567924 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" event={"ID":"cf8299a2-da8d-489e-bc54-1212dd8d3099","Type":"ContainerStarted","Data":"9d8845b549137d4bc6185d207f3800b7f058c658ef3e9deeffc8beaccb0da045"} Jan 23 11:00:42 crc kubenswrapper[4689]: I0123 11:00:42.639860 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:42 crc kubenswrapper[4689]: I0123 11:00:42.640448 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" Jan 23 11:00:42 crc kubenswrapper[4689]: I0123 11:00:42.873223 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk"] Jan 23 11:00:42 crc kubenswrapper[4689]: W0123 11:00:42.879038 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2805243a_4a45_4dc0_b5a2_91c2163e11b4.slice/crio-b78276189b943b6917615009ec98b09ee56876336f50777b452702de33601479 WatchSource:0}: Error finding container b78276189b943b6917615009ec98b09ee56876336f50777b452702de33601479: Status 404 returned error can't find the container with id b78276189b943b6917615009ec98b09ee56876336f50777b452702de33601479 Jan 23 11:00:43 crc kubenswrapper[4689]: I0123 11:00:43.576048 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" event={"ID":"2805243a-4a45-4dc0-b5a2-91c2163e11b4","Type":"ContainerStarted","Data":"b78276189b943b6917615009ec98b09ee56876336f50777b452702de33601479"} Jan 23 11:00:43 crc kubenswrapper[4689]: I0123 11:00:43.639831 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:43 crc kubenswrapper[4689]: I0123 11:00:43.640572 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:44 crc kubenswrapper[4689]: I0123 11:00:44.048210 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-7qpjs"] Jan 23 11:00:44 crc kubenswrapper[4689]: W0123 11:00:44.068322 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda13e2123_3780_4c13_b8a4_760d31e5636e.slice/crio-9ba6afe709ff2b9199ddb46f71a0c4507388b53bbef066846a5095652a28bcc5 WatchSource:0}: Error finding container 9ba6afe709ff2b9199ddb46f71a0c4507388b53bbef066846a5095652a28bcc5: Status 404 returned error can't find the container with id 9ba6afe709ff2b9199ddb46f71a0c4507388b53bbef066846a5095652a28bcc5 Jan 23 11:00:44 crc kubenswrapper[4689]: I0123 11:00:44.590574 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" event={"ID":"a13e2123-3780-4c13-b8a4-760d31e5636e","Type":"ContainerStarted","Data":"9ba6afe709ff2b9199ddb46f71a0c4507388b53bbef066846a5095652a28bcc5"} Jan 23 11:00:44 crc kubenswrapper[4689]: I0123 11:00:44.639551 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:44 crc kubenswrapper[4689]: I0123 11:00:44.640594 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" Jan 23 11:00:45 crc kubenswrapper[4689]: I0123 11:00:45.981592 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb"] Jan 23 11:00:47 crc kubenswrapper[4689]: W0123 11:00:47.368746 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55938c76_e594_4530_b7ee_0e7f3089063e.slice/crio-c4441ffb575698e4c040cecbe3c8d84aad62a1cb20c13592d2c17335f0ea8316 WatchSource:0}: Error finding container c4441ffb575698e4c040cecbe3c8d84aad62a1cb20c13592d2c17335f0ea8316: Status 404 returned error can't find the container with id c4441ffb575698e4c040cecbe3c8d84aad62a1cb20c13592d2c17335f0ea8316 Jan 23 11:00:47 crc kubenswrapper[4689]: I0123 11:00:47.627820 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" event={"ID":"55938c76-e594-4530-b7ee-0e7f3089063e","Type":"ContainerStarted","Data":"c4441ffb575698e4c040cecbe3c8d84aad62a1cb20c13592d2c17335f0ea8316"} Jan 23 11:00:47 crc kubenswrapper[4689]: I0123 11:00:47.948689 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.638760 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" event={"ID":"a13e2123-3780-4c13-b8a4-760d31e5636e","Type":"ContainerStarted","Data":"a132c063dbf0fb771f9ba01eff3176980291f57e5672f78b21c6ca61959474b5"} Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.640085 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.642712 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" event={"ID":"45cadce8-d2da-450b-9b37-c2a6b2a1c595","Type":"ContainerStarted","Data":"69e3263f615b7a5c939fd2604f8e31d807c6746b23f541b888f7e11cf09c7d2e"} Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.643468 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.644459 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" event={"ID":"cf8299a2-da8d-489e-bc54-1212dd8d3099","Type":"ContainerStarted","Data":"4278bdc310cad88a18b80adb34bdd28bafab795950850cf69d4801840dcf4a0a"} Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.646506 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" event={"ID":"2805243a-4a45-4dc0-b5a2-91c2163e11b4","Type":"ContainerStarted","Data":"61b13d64732d4a8e59fbebaf97fc14947f7f2f72979a2dba33fc28406725ec18"} Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.647010 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.660119 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podStartSLOduration=26.857405355 podStartE2EDuration="30.660097635s" podCreationTimestamp="2026-01-23 11:00:18 +0000 UTC" firstStartedPulling="2026-01-23 11:00:44.070893507 +0000 UTC m=+708.695573366" lastFinishedPulling="2026-01-23 11:00:47.873585787 +0000 UTC m=+712.498265646" observedRunningTime="2026-01-23 11:00:48.655047705 +0000 UTC m=+713.279727584" watchObservedRunningTime="2026-01-23 11:00:48.660097635 +0000 UTC m=+713.284777494" Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.702695 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-qndbv" podStartSLOduration=24.872576666 podStartE2EDuration="30.70267824s" podCreationTimestamp="2026-01-23 11:00:18 +0000 UTC" firstStartedPulling="2026-01-23 11:00:41.939532326 +0000 UTC m=+706.564212185" lastFinishedPulling="2026-01-23 11:00:47.7696339 +0000 UTC m=+712.394313759" observedRunningTime="2026-01-23 11:00:48.701497709 +0000 UTC m=+713.326177568" watchObservedRunningTime="2026-01-23 11:00:48.70267824 +0000 UTC m=+713.327358099" Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.705864 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk" podStartSLOduration=25.746174004 podStartE2EDuration="30.705853032s" podCreationTimestamp="2026-01-23 11:00:18 +0000 UTC" firstStartedPulling="2026-01-23 11:00:42.882390731 +0000 UTC m=+707.507070590" lastFinishedPulling="2026-01-23 11:00:47.842069759 +0000 UTC m=+712.466749618" observedRunningTime="2026-01-23 11:00:48.676984513 +0000 UTC m=+713.301664402" watchObservedRunningTime="2026-01-23 11:00:48.705853032 +0000 UTC m=+713.330532891" Jan 23 11:00:48 crc kubenswrapper[4689]: I0123 11:00:48.724470 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podStartSLOduration=24.73437768 podStartE2EDuration="30.724453694s" podCreationTimestamp="2026-01-23 11:00:18 +0000 UTC" firstStartedPulling="2026-01-23 11:00:41.892135606 +0000 UTC m=+706.516815465" lastFinishedPulling="2026-01-23 11:00:47.88221162 +0000 UTC m=+712.506891479" observedRunningTime="2026-01-23 11:00:48.723662105 +0000 UTC m=+713.348341964" watchObservedRunningTime="2026-01-23 11:00:48.724453694 +0000 UTC m=+713.349133553" Jan 23 11:00:50 crc kubenswrapper[4689]: I0123 11:00:50.667185 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" event={"ID":"55938c76-e594-4530-b7ee-0e7f3089063e","Type":"ContainerStarted","Data":"06b9c5108b4d1fce558a5040ae61004a87d422411598c762c4d81787a4c36c09"} Jan 23 11:00:50 crc kubenswrapper[4689]: I0123 11:00:50.693479 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-5spsb" podStartSLOduration=30.119706438 podStartE2EDuration="32.693439401s" podCreationTimestamp="2026-01-23 11:00:18 +0000 UTC" firstStartedPulling="2026-01-23 11:00:47.376173135 +0000 UTC m=+712.000853004" lastFinishedPulling="2026-01-23 11:00:49.949906098 +0000 UTC m=+714.574585967" observedRunningTime="2026-01-23 11:00:50.686102682 +0000 UTC m=+715.310782551" watchObservedRunningTime="2026-01-23 11:00:50.693439401 +0000 UTC m=+715.318119270" Jan 23 11:00:58 crc kubenswrapper[4689]: I0123 11:00:58.882042 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.128734 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv"] Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.129849 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.132218 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.132270 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.132906 4689 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-phxpb" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.135634 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-ksh87"] Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.138563 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-ksh87" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.144996 4689 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-ghqpt" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.147689 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv"] Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.163896 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-ksh87"] Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.177289 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-pw8pk"] Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.178407 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.180670 4689 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-mgg88" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.185599 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsb86\" (UniqueName: \"kubernetes.io/projected/13e7690f-c133-46d4-8a38-f4238d3cf4cc-kube-api-access-qsb86\") pod \"cert-manager-858654f9db-ksh87\" (UID: \"13e7690f-c133-46d4-8a38-f4238d3cf4cc\") " pod="cert-manager/cert-manager-858654f9db-ksh87" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.185719 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl96s\" (UniqueName: \"kubernetes.io/projected/6395a95b-6677-4801-9026-34268f7bdf2a-kube-api-access-wl96s\") pod \"cert-manager-cainjector-cf98fcc89-cfnhv\" (UID: \"6395a95b-6677-4801-9026-34268f7bdf2a\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.186654 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-pw8pk"] Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.286662 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsb86\" (UniqueName: \"kubernetes.io/projected/13e7690f-c133-46d4-8a38-f4238d3cf4cc-kube-api-access-qsb86\") pod \"cert-manager-858654f9db-ksh87\" (UID: \"13e7690f-c133-46d4-8a38-f4238d3cf4cc\") " pod="cert-manager/cert-manager-858654f9db-ksh87" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.286776 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvqxl\" (UniqueName: \"kubernetes.io/projected/7496161e-1841-4a0e-ac40-e157bbfd9520-kube-api-access-gvqxl\") pod \"cert-manager-webhook-687f57d79b-pw8pk\" (UID: \"7496161e-1841-4a0e-ac40-e157bbfd9520\") " pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.286824 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl96s\" (UniqueName: \"kubernetes.io/projected/6395a95b-6677-4801-9026-34268f7bdf2a-kube-api-access-wl96s\") pod \"cert-manager-cainjector-cf98fcc89-cfnhv\" (UID: \"6395a95b-6677-4801-9026-34268f7bdf2a\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.304985 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsb86\" (UniqueName: \"kubernetes.io/projected/13e7690f-c133-46d4-8a38-f4238d3cf4cc-kube-api-access-qsb86\") pod \"cert-manager-858654f9db-ksh87\" (UID: \"13e7690f-c133-46d4-8a38-f4238d3cf4cc\") " pod="cert-manager/cert-manager-858654f9db-ksh87" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.305009 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl96s\" (UniqueName: \"kubernetes.io/projected/6395a95b-6677-4801-9026-34268f7bdf2a-kube-api-access-wl96s\") pod \"cert-manager-cainjector-cf98fcc89-cfnhv\" (UID: \"6395a95b-6677-4801-9026-34268f7bdf2a\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.387724 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvqxl\" (UniqueName: \"kubernetes.io/projected/7496161e-1841-4a0e-ac40-e157bbfd9520-kube-api-access-gvqxl\") pod \"cert-manager-webhook-687f57d79b-pw8pk\" (UID: \"7496161e-1841-4a0e-ac40-e157bbfd9520\") " pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.405386 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvqxl\" (UniqueName: \"kubernetes.io/projected/7496161e-1841-4a0e-ac40-e157bbfd9520-kube-api-access-gvqxl\") pod \"cert-manager-webhook-687f57d79b-pw8pk\" (UID: \"7496161e-1841-4a0e-ac40-e157bbfd9520\") " pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.451845 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.463960 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-ksh87" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.497912 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.721198 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv"] Jan 23 11:00:59 crc kubenswrapper[4689]: W0123 11:00:59.725349 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6395a95b_6677_4801_9026_34268f7bdf2a.slice/crio-fd20b86b40145c058286b0905ac66376f629d127135ab6f45180b2346c80d4bd WatchSource:0}: Error finding container fd20b86b40145c058286b0905ac66376f629d127135ab6f45180b2346c80d4bd: Status 404 returned error can't find the container with id fd20b86b40145c058286b0905ac66376f629d127135ab6f45180b2346c80d4bd Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.970611 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-pw8pk"] Jan 23 11:00:59 crc kubenswrapper[4689]: I0123 11:00:59.975743 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-ksh87"] Jan 23 11:00:59 crc kubenswrapper[4689]: W0123 11:00:59.978190 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13e7690f_c133_46d4_8a38_f4238d3cf4cc.slice/crio-396ed0275875f8b2651da1f3b2b582a881f5cd20dd450042c985a070b28f68c7 WatchSource:0}: Error finding container 396ed0275875f8b2651da1f3b2b582a881f5cd20dd450042c985a070b28f68c7: Status 404 returned error can't find the container with id 396ed0275875f8b2651da1f3b2b582a881f5cd20dd450042c985a070b28f68c7 Jan 23 11:01:00 crc kubenswrapper[4689]: I0123 11:01:00.736193 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-ksh87" event={"ID":"13e7690f-c133-46d4-8a38-f4238d3cf4cc","Type":"ContainerStarted","Data":"396ed0275875f8b2651da1f3b2b582a881f5cd20dd450042c985a070b28f68c7"} Jan 23 11:01:00 crc kubenswrapper[4689]: I0123 11:01:00.740276 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" event={"ID":"7496161e-1841-4a0e-ac40-e157bbfd9520","Type":"ContainerStarted","Data":"aa2fc8d04163b69d5cddc552b4796a5715652b637b764b79f4a1e0fd56d3a603"} Jan 23 11:01:00 crc kubenswrapper[4689]: I0123 11:01:00.746499 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" event={"ID":"6395a95b-6677-4801-9026-34268f7bdf2a","Type":"ContainerStarted","Data":"fd20b86b40145c058286b0905ac66376f629d127135ab6f45180b2346c80d4bd"} Jan 23 11:01:03 crc kubenswrapper[4689]: I0123 11:01:03.311470 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:01:03 crc kubenswrapper[4689]: I0123 11:01:03.311857 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:01:09 crc kubenswrapper[4689]: I0123 11:01:09.820688 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" event={"ID":"6395a95b-6677-4801-9026-34268f7bdf2a","Type":"ContainerStarted","Data":"263c8389a73e100ce7cf969725e50e340b78a6bfb3bbf77083c83f99dd690af2"} Jan 23 11:01:09 crc kubenswrapper[4689]: I0123 11:01:09.838439 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-cfnhv" podStartSLOduration=1.875877675 podStartE2EDuration="10.83841992s" podCreationTimestamp="2026-01-23 11:00:59 +0000 UTC" firstStartedPulling="2026-01-23 11:00:59.727590329 +0000 UTC m=+724.352270188" lastFinishedPulling="2026-01-23 11:01:08.690132584 +0000 UTC m=+733.314812433" observedRunningTime="2026-01-23 11:01:09.838369218 +0000 UTC m=+734.463049067" watchObservedRunningTime="2026-01-23 11:01:09.83841992 +0000 UTC m=+734.463099779" Jan 23 11:01:10 crc kubenswrapper[4689]: I0123 11:01:10.828726 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-ksh87" event={"ID":"13e7690f-c133-46d4-8a38-f4238d3cf4cc","Type":"ContainerStarted","Data":"9cc8589d10d7fef58be2972f62ce7e413e987b280bb40a111e258fa9eaccf901"} Jan 23 11:01:10 crc kubenswrapper[4689]: I0123 11:01:10.831043 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" event={"ID":"7496161e-1841-4a0e-ac40-e157bbfd9520","Type":"ContainerStarted","Data":"bb48cdc5f71f729324f109aab86257cc796e96474c15d85369d43884355d25f7"} Jan 23 11:01:10 crc kubenswrapper[4689]: I0123 11:01:10.852667 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-ksh87" podStartSLOduration=1.278382734 podStartE2EDuration="11.852650632s" podCreationTimestamp="2026-01-23 11:00:59 +0000 UTC" firstStartedPulling="2026-01-23 11:00:59.979571162 +0000 UTC m=+724.604251021" lastFinishedPulling="2026-01-23 11:01:10.55383906 +0000 UTC m=+735.178518919" observedRunningTime="2026-01-23 11:01:10.844041102 +0000 UTC m=+735.468720991" watchObservedRunningTime="2026-01-23 11:01:10.852650632 +0000 UTC m=+735.477330491" Jan 23 11:01:11 crc kubenswrapper[4689]: I0123 11:01:11.838697 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.754235 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podStartSLOduration=6.234186913 podStartE2EDuration="16.754215963s" podCreationTimestamp="2026-01-23 11:00:59 +0000 UTC" firstStartedPulling="2026-01-23 11:00:59.975536908 +0000 UTC m=+724.600216767" lastFinishedPulling="2026-01-23 11:01:10.495565948 +0000 UTC m=+735.120245817" observedRunningTime="2026-01-23 11:01:10.875799535 +0000 UTC m=+735.500479444" watchObservedRunningTime="2026-01-23 11:01:15.754215963 +0000 UTC m=+740.378895832" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.757203 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pwljm"] Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.758604 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.786783 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwljm"] Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.854647 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-catalog-content\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.854745 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g5fj\" (UniqueName: \"kubernetes.io/projected/0c438d31-4fa0-4142-8634-838d7ecfa985-kube-api-access-9g5fj\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.854798 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-utilities\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.955836 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-catalog-content\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.955937 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g5fj\" (UniqueName: \"kubernetes.io/projected/0c438d31-4fa0-4142-8634-838d7ecfa985-kube-api-access-9g5fj\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.955994 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-utilities\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.957864 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-catalog-content\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:15 crc kubenswrapper[4689]: I0123 11:01:15.958218 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-utilities\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:16 crc kubenswrapper[4689]: I0123 11:01:16.004921 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g5fj\" (UniqueName: \"kubernetes.io/projected/0c438d31-4fa0-4142-8634-838d7ecfa985-kube-api-access-9g5fj\") pod \"redhat-marketplace-pwljm\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:16 crc kubenswrapper[4689]: I0123 11:01:16.135430 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:16 crc kubenswrapper[4689]: I0123 11:01:16.365136 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwljm"] Jan 23 11:01:16 crc kubenswrapper[4689]: I0123 11:01:16.875992 4689 generic.go:334] "Generic (PLEG): container finished" podID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerID="04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e" exitCode=0 Jan 23 11:01:16 crc kubenswrapper[4689]: I0123 11:01:16.876082 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwljm" event={"ID":"0c438d31-4fa0-4142-8634-838d7ecfa985","Type":"ContainerDied","Data":"04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e"} Jan 23 11:01:16 crc kubenswrapper[4689]: I0123 11:01:16.876404 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwljm" event={"ID":"0c438d31-4fa0-4142-8634-838d7ecfa985","Type":"ContainerStarted","Data":"81b94b1bc923f3382b59aef726c1225513bd3b552984c68522b0cfee3c0cc216"} Jan 23 11:01:17 crc kubenswrapper[4689]: I0123 11:01:17.888774 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwljm" event={"ID":"0c438d31-4fa0-4142-8634-838d7ecfa985","Type":"ContainerStarted","Data":"4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1"} Jan 23 11:01:18 crc kubenswrapper[4689]: I0123 11:01:18.898925 4689 generic.go:334] "Generic (PLEG): container finished" podID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerID="4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1" exitCode=0 Jan 23 11:01:18 crc kubenswrapper[4689]: I0123 11:01:18.898984 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwljm" event={"ID":"0c438d31-4fa0-4142-8634-838d7ecfa985","Type":"ContainerDied","Data":"4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1"} Jan 23 11:01:19 crc kubenswrapper[4689]: I0123 11:01:19.501136 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" Jan 23 11:01:19 crc kubenswrapper[4689]: I0123 11:01:19.915389 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwljm" event={"ID":"0c438d31-4fa0-4142-8634-838d7ecfa985","Type":"ContainerStarted","Data":"8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a"} Jan 23 11:01:19 crc kubenswrapper[4689]: I0123 11:01:19.937270 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pwljm" podStartSLOduration=2.505007458 podStartE2EDuration="4.937251601s" podCreationTimestamp="2026-01-23 11:01:15 +0000 UTC" firstStartedPulling="2026-01-23 11:01:16.879974181 +0000 UTC m=+741.504654070" lastFinishedPulling="2026-01-23 11:01:19.312218354 +0000 UTC m=+743.936898213" observedRunningTime="2026-01-23 11:01:19.933143576 +0000 UTC m=+744.557823445" watchObservedRunningTime="2026-01-23 11:01:19.937251601 +0000 UTC m=+744.561931470" Jan 23 11:01:26 crc kubenswrapper[4689]: I0123 11:01:26.135780 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:26 crc kubenswrapper[4689]: I0123 11:01:26.136512 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:26 crc kubenswrapper[4689]: I0123 11:01:26.175200 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:27 crc kubenswrapper[4689]: I0123 11:01:27.014249 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:27 crc kubenswrapper[4689]: I0123 11:01:27.073673 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwljm"] Jan 23 11:01:28 crc kubenswrapper[4689]: I0123 11:01:28.984565 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pwljm" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="registry-server" containerID="cri-o://8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a" gracePeriod=2 Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.922182 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.971418 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-utilities\") pod \"0c438d31-4fa0-4142-8634-838d7ecfa985\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.971495 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g5fj\" (UniqueName: \"kubernetes.io/projected/0c438d31-4fa0-4142-8634-838d7ecfa985-kube-api-access-9g5fj\") pod \"0c438d31-4fa0-4142-8634-838d7ecfa985\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.971618 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-catalog-content\") pod \"0c438d31-4fa0-4142-8634-838d7ecfa985\" (UID: \"0c438d31-4fa0-4142-8634-838d7ecfa985\") " Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.973710 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-utilities" (OuterVolumeSpecName: "utilities") pod "0c438d31-4fa0-4142-8634-838d7ecfa985" (UID: "0c438d31-4fa0-4142-8634-838d7ecfa985"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.983630 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c438d31-4fa0-4142-8634-838d7ecfa985-kube-api-access-9g5fj" (OuterVolumeSpecName: "kube-api-access-9g5fj") pod "0c438d31-4fa0-4142-8634-838d7ecfa985" (UID: "0c438d31-4fa0-4142-8634-838d7ecfa985"). InnerVolumeSpecName "kube-api-access-9g5fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.993358 4689 generic.go:334] "Generic (PLEG): container finished" podID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerID="8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a" exitCode=0 Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.993378 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0c438d31-4fa0-4142-8634-838d7ecfa985" (UID: "0c438d31-4fa0-4142-8634-838d7ecfa985"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.993414 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwljm" event={"ID":"0c438d31-4fa0-4142-8634-838d7ecfa985","Type":"ContainerDied","Data":"8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a"} Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.993441 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwljm" Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.993466 4689 scope.go:117] "RemoveContainer" containerID="8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a" Jan 23 11:01:29 crc kubenswrapper[4689]: I0123 11:01:29.993451 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwljm" event={"ID":"0c438d31-4fa0-4142-8634-838d7ecfa985","Type":"ContainerDied","Data":"81b94b1bc923f3382b59aef726c1225513bd3b552984c68522b0cfee3c0cc216"} Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.021960 4689 scope.go:117] "RemoveContainer" containerID="4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.034084 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwljm"] Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.038885 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwljm"] Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.061382 4689 scope.go:117] "RemoveContainer" containerID="04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.073262 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g5fj\" (UniqueName: \"kubernetes.io/projected/0c438d31-4fa0-4142-8634-838d7ecfa985-kube-api-access-9g5fj\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.073313 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.073331 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c438d31-4fa0-4142-8634-838d7ecfa985-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.079140 4689 scope.go:117] "RemoveContainer" containerID="8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a" Jan 23 11:01:30 crc kubenswrapper[4689]: E0123 11:01:30.079598 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a\": container with ID starting with 8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a not found: ID does not exist" containerID="8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.079637 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a"} err="failed to get container status \"8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a\": rpc error: code = NotFound desc = could not find container \"8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a\": container with ID starting with 8982203bbdd9a108d4923110586ecd8dde83557cbe22a8982f03171ec067f81a not found: ID does not exist" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.079660 4689 scope.go:117] "RemoveContainer" containerID="4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1" Jan 23 11:01:30 crc kubenswrapper[4689]: E0123 11:01:30.080020 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1\": container with ID starting with 4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1 not found: ID does not exist" containerID="4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.080047 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1"} err="failed to get container status \"4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1\": rpc error: code = NotFound desc = could not find container \"4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1\": container with ID starting with 4ed04d05fe406901b4182d797c5ec8b8a5ce3c446f0f1690ba048fffc7351eb1 not found: ID does not exist" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.080063 4689 scope.go:117] "RemoveContainer" containerID="04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e" Jan 23 11:01:30 crc kubenswrapper[4689]: E0123 11:01:30.080476 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e\": container with ID starting with 04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e not found: ID does not exist" containerID="04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e" Jan 23 11:01:30 crc kubenswrapper[4689]: I0123 11:01:30.080517 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e"} err="failed to get container status \"04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e\": rpc error: code = NotFound desc = could not find container \"04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e\": container with ID starting with 04b6e4c97c0ec023f360490041b89ba368ab784f146cc2ca61e417701097e87e not found: ID does not exist" Jan 23 11:01:31 crc kubenswrapper[4689]: I0123 11:01:31.654298 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" path="/var/lib/kubelet/pods/0c438d31-4fa0-4142-8634-838d7ecfa985/volumes" Jan 23 11:01:33 crc kubenswrapper[4689]: I0123 11:01:33.311793 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:01:33 crc kubenswrapper[4689]: I0123 11:01:33.311918 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:01:33 crc kubenswrapper[4689]: I0123 11:01:33.312003 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:01:33 crc kubenswrapper[4689]: I0123 11:01:33.312967 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6c7330462c260e571fdbe25c842509e5da85ef7832a1583f72e3e82ae187dabf"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:01:33 crc kubenswrapper[4689]: I0123 11:01:33.313065 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://6c7330462c260e571fdbe25c842509e5da85ef7832a1583f72e3e82ae187dabf" gracePeriod=600 Jan 23 11:01:34 crc kubenswrapper[4689]: I0123 11:01:34.024937 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="6c7330462c260e571fdbe25c842509e5da85ef7832a1583f72e3e82ae187dabf" exitCode=0 Jan 23 11:01:34 crc kubenswrapper[4689]: I0123 11:01:34.025017 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"6c7330462c260e571fdbe25c842509e5da85ef7832a1583f72e3e82ae187dabf"} Jan 23 11:01:34 crc kubenswrapper[4689]: I0123 11:01:34.025550 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"c7ede398ad329e3d1da033621f66a70a5e2c9501cd8a9e0138db79e18cff983d"} Jan 23 11:01:34 crc kubenswrapper[4689]: I0123 11:01:34.025588 4689 scope.go:117] "RemoveContainer" containerID="a4159ab9ef2d5639be486fab68cc378ce37a59b9cc85de6b3699c4833d973bdb" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.530465 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd"] Jan 23 11:01:47 crc kubenswrapper[4689]: E0123 11:01:47.531220 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="extract-content" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.531236 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="extract-content" Jan 23 11:01:47 crc kubenswrapper[4689]: E0123 11:01:47.531251 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="extract-utilities" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.531258 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="extract-utilities" Jan 23 11:01:47 crc kubenswrapper[4689]: E0123 11:01:47.531269 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="registry-server" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.531276 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="registry-server" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.531402 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c438d31-4fa0-4142-8634-838d7ecfa985" containerName="registry-server" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.532214 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.536006 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.552542 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd"] Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.559608 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.559689 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4b6f\" (UniqueName: \"kubernetes.io/projected/007f3542-55a2-495b-a618-6933e425c7c3-kube-api-access-g4b6f\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.559799 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.661334 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.661390 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.661421 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4b6f\" (UniqueName: \"kubernetes.io/projected/007f3542-55a2-495b-a618-6933e425c7c3-kube-api-access-g4b6f\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.661935 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.661996 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.688713 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4b6f\" (UniqueName: \"kubernetes.io/projected/007f3542-55a2-495b-a618-6933e425c7c3-kube-api-access-g4b6f\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:47 crc kubenswrapper[4689]: I0123 11:01:47.861018 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:48 crc kubenswrapper[4689]: I0123 11:01:48.286334 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd"] Jan 23 11:01:49 crc kubenswrapper[4689]: I0123 11:01:49.168845 4689 generic.go:334] "Generic (PLEG): container finished" podID="007f3542-55a2-495b-a618-6933e425c7c3" containerID="461ab4f66f9b5488cf29bbf92a555ada0c7e09012233294f926a565fc6a08161" exitCode=0 Jan 23 11:01:49 crc kubenswrapper[4689]: I0123 11:01:49.168936 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" event={"ID":"007f3542-55a2-495b-a618-6933e425c7c3","Type":"ContainerDied","Data":"461ab4f66f9b5488cf29bbf92a555ada0c7e09012233294f926a565fc6a08161"} Jan 23 11:01:49 crc kubenswrapper[4689]: I0123 11:01:49.169199 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" event={"ID":"007f3542-55a2-495b-a618-6933e425c7c3","Type":"ContainerStarted","Data":"8914304e20c4d9c09cdce438d93a2f13b6469e4ada86fd6fc1620c9b62751732"} Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.330875 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp"] Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.332648 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.335270 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp"] Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.406457 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.406515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2rqs\" (UniqueName: \"kubernetes.io/projected/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-kube-api-access-m2rqs\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.406686 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.508035 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.508120 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2rqs\" (UniqueName: \"kubernetes.io/projected/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-kube-api-access-m2rqs\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.508230 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.508998 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.509043 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.527034 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2rqs\" (UniqueName: \"kubernetes.io/projected/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-kube-api-access-m2rqs\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.663970 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:50 crc kubenswrapper[4689]: I0123 11:01:50.832661 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp"] Jan 23 11:01:50 crc kubenswrapper[4689]: W0123 11:01:50.838132 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6b7cf7d_3b83_4abb_8ca1_9b623c85917c.slice/crio-7c6252459814e78216e8fd3d5512b1f9dc9159463396189046f785105ca8d15b WatchSource:0}: Error finding container 7c6252459814e78216e8fd3d5512b1f9dc9159463396189046f785105ca8d15b: Status 404 returned error can't find the container with id 7c6252459814e78216e8fd3d5512b1f9dc9159463396189046f785105ca8d15b Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.202677 4689 generic.go:334] "Generic (PLEG): container finished" podID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerID="23948ebb76ea2da617b8949ef5413457f7630c9e4923281a627260ae6b1236f4" exitCode=0 Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.202747 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" event={"ID":"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c","Type":"ContainerDied","Data":"23948ebb76ea2da617b8949ef5413457f7630c9e4923281a627260ae6b1236f4"} Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.203141 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" event={"ID":"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c","Type":"ContainerStarted","Data":"7c6252459814e78216e8fd3d5512b1f9dc9159463396189046f785105ca8d15b"} Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.205711 4689 generic.go:334] "Generic (PLEG): container finished" podID="007f3542-55a2-495b-a618-6933e425c7c3" containerID="032ea432f171450ec13242a1b8390010933cc64044303c882c195c3fad190f60" exitCode=0 Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.205779 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" event={"ID":"007f3542-55a2-495b-a618-6933e425c7c3","Type":"ContainerDied","Data":"032ea432f171450ec13242a1b8390010933cc64044303c882c195c3fad190f60"} Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.861496 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sjmnf"] Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.862726 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.882718 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sjmnf"] Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.933633 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-catalog-content\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.933695 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-utilities\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:51 crc kubenswrapper[4689]: I0123 11:01:51.933722 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv6sp\" (UniqueName: \"kubernetes.io/projected/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-kube-api-access-jv6sp\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.034805 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-catalog-content\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.034868 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-utilities\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.034905 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv6sp\" (UniqueName: \"kubernetes.io/projected/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-kube-api-access-jv6sp\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.035369 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-catalog-content\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.035424 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-utilities\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.052893 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv6sp\" (UniqueName: \"kubernetes.io/projected/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-kube-api-access-jv6sp\") pod \"redhat-operators-sjmnf\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.190020 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.213669 4689 generic.go:334] "Generic (PLEG): container finished" podID="007f3542-55a2-495b-a618-6933e425c7c3" containerID="9f4c5aa64eb4e23c0c6be64907de6c3803329415794784f65a8b30b254821969" exitCode=0 Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.213716 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" event={"ID":"007f3542-55a2-495b-a618-6933e425c7c3","Type":"ContainerDied","Data":"9f4c5aa64eb4e23c0c6be64907de6c3803329415794784f65a8b30b254821969"} Jan 23 11:01:52 crc kubenswrapper[4689]: I0123 11:01:52.428991 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sjmnf"] Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.221982 4689 generic.go:334] "Generic (PLEG): container finished" podID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerID="b18e4a7e2c84f868ca0f737e1ad830418c33af0d56a9935754e1b3ad11908666" exitCode=0 Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.222035 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" event={"ID":"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c","Type":"ContainerDied","Data":"b18e4a7e2c84f868ca0f737e1ad830418c33af0d56a9935754e1b3ad11908666"} Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.224212 4689 generic.go:334] "Generic (PLEG): container finished" podID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerID="03efac7e2bc276bf08ba71fa064b615298e04451c1a9f2a7de7a0507f54d0a17" exitCode=0 Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.224291 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjmnf" event={"ID":"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c","Type":"ContainerDied","Data":"03efac7e2bc276bf08ba71fa064b615298e04451c1a9f2a7de7a0507f54d0a17"} Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.224315 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjmnf" event={"ID":"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c","Type":"ContainerStarted","Data":"223459cc22fc8060976b617d949a3f5f231edbf5a058bdc76a50624314512cb4"} Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.518572 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.554548 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-util\") pod \"007f3542-55a2-495b-a618-6933e425c7c3\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.555006 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-bundle\") pod \"007f3542-55a2-495b-a618-6933e425c7c3\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.555098 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4b6f\" (UniqueName: \"kubernetes.io/projected/007f3542-55a2-495b-a618-6933e425c7c3-kube-api-access-g4b6f\") pod \"007f3542-55a2-495b-a618-6933e425c7c3\" (UID: \"007f3542-55a2-495b-a618-6933e425c7c3\") " Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.556253 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-bundle" (OuterVolumeSpecName: "bundle") pod "007f3542-55a2-495b-a618-6933e425c7c3" (UID: "007f3542-55a2-495b-a618-6933e425c7c3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.566377 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/007f3542-55a2-495b-a618-6933e425c7c3-kube-api-access-g4b6f" (OuterVolumeSpecName: "kube-api-access-g4b6f") pod "007f3542-55a2-495b-a618-6933e425c7c3" (UID: "007f3542-55a2-495b-a618-6933e425c7c3"). InnerVolumeSpecName "kube-api-access-g4b6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.569792 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-util" (OuterVolumeSpecName: "util") pod "007f3542-55a2-495b-a618-6933e425c7c3" (UID: "007f3542-55a2-495b-a618-6933e425c7c3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.656780 4689 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.656811 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4b6f\" (UniqueName: \"kubernetes.io/projected/007f3542-55a2-495b-a618-6933e425c7c3-kube-api-access-g4b6f\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:53 crc kubenswrapper[4689]: I0123 11:01:53.656822 4689 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/007f3542-55a2-495b-a618-6933e425c7c3-util\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:54 crc kubenswrapper[4689]: I0123 11:01:54.233802 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" event={"ID":"007f3542-55a2-495b-a618-6933e425c7c3","Type":"ContainerDied","Data":"8914304e20c4d9c09cdce438d93a2f13b6469e4ada86fd6fc1620c9b62751732"} Jan 23 11:01:54 crc kubenswrapper[4689]: I0123 11:01:54.233847 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8914304e20c4d9c09cdce438d93a2f13b6469e4ada86fd6fc1620c9b62751732" Jan 23 11:01:54 crc kubenswrapper[4689]: I0123 11:01:54.233879 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd" Jan 23 11:01:54 crc kubenswrapper[4689]: I0123 11:01:54.235864 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjmnf" event={"ID":"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c","Type":"ContainerStarted","Data":"4f86c8d42d585a02adad23648ac927b56ca78016c33f60277ea0b179d89f1153"} Jan 23 11:01:54 crc kubenswrapper[4689]: I0123 11:01:54.240973 4689 generic.go:334] "Generic (PLEG): container finished" podID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerID="ce833b9fd5ae585c9e48ee51702c5728d3e0924651d7f2195b21b5ac53adac2d" exitCode=0 Jan 23 11:01:54 crc kubenswrapper[4689]: I0123 11:01:54.241018 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" event={"ID":"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c","Type":"ContainerDied","Data":"ce833b9fd5ae585c9e48ee51702c5728d3e0924651d7f2195b21b5ac53adac2d"} Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.249480 4689 generic.go:334] "Generic (PLEG): container finished" podID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerID="4f86c8d42d585a02adad23648ac927b56ca78016c33f60277ea0b179d89f1153" exitCode=0 Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.249578 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjmnf" event={"ID":"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c","Type":"ContainerDied","Data":"4f86c8d42d585a02adad23648ac927b56ca78016c33f60277ea0b179d89f1153"} Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.478912 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.586505 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2rqs\" (UniqueName: \"kubernetes.io/projected/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-kube-api-access-m2rqs\") pod \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.586932 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-bundle\") pod \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.586990 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-util\") pod \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\" (UID: \"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c\") " Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.587787 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-bundle" (OuterVolumeSpecName: "bundle") pod "a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" (UID: "a6b7cf7d-3b83-4abb-8ca1-9b623c85917c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.592797 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-kube-api-access-m2rqs" (OuterVolumeSpecName: "kube-api-access-m2rqs") pod "a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" (UID: "a6b7cf7d-3b83-4abb-8ca1-9b623c85917c"). InnerVolumeSpecName "kube-api-access-m2rqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.600615 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-util" (OuterVolumeSpecName: "util") pod "a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" (UID: "a6b7cf7d-3b83-4abb-8ca1-9b623c85917c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.688434 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2rqs\" (UniqueName: \"kubernetes.io/projected/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-kube-api-access-m2rqs\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.688696 4689 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:55 crc kubenswrapper[4689]: I0123 11:01:55.688823 4689 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6b7cf7d-3b83-4abb-8ca1-9b623c85917c-util\") on node \"crc\" DevicePath \"\"" Jan 23 11:01:56 crc kubenswrapper[4689]: I0123 11:01:56.256416 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" event={"ID":"a6b7cf7d-3b83-4abb-8ca1-9b623c85917c","Type":"ContainerDied","Data":"7c6252459814e78216e8fd3d5512b1f9dc9159463396189046f785105ca8d15b"} Jan 23 11:01:56 crc kubenswrapper[4689]: I0123 11:01:56.256637 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c6252459814e78216e8fd3d5512b1f9dc9159463396189046f785105ca8d15b" Jan 23 11:01:56 crc kubenswrapper[4689]: I0123 11:01:56.256451 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp" Jan 23 11:01:56 crc kubenswrapper[4689]: I0123 11:01:56.259081 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjmnf" event={"ID":"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c","Type":"ContainerStarted","Data":"8f9a50b51e2f93489a850837b83047b78b671826dcdbad3ef02c43b522ee8dd2"} Jan 23 11:01:56 crc kubenswrapper[4689]: I0123 11:01:56.276776 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sjmnf" podStartSLOduration=2.843014434 podStartE2EDuration="5.276757799s" podCreationTimestamp="2026-01-23 11:01:51 +0000 UTC" firstStartedPulling="2026-01-23 11:01:53.226561508 +0000 UTC m=+777.851241367" lastFinishedPulling="2026-01-23 11:01:55.660304873 +0000 UTC m=+780.284984732" observedRunningTime="2026-01-23 11:01:56.272961621 +0000 UTC m=+780.897641480" watchObservedRunningTime="2026-01-23 11:01:56.276757799 +0000 UTC m=+780.901437658" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.542722 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2"] Jan 23 11:02:01 crc kubenswrapper[4689]: E0123 11:02:01.542952 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="007f3542-55a2-495b-a618-6933e425c7c3" containerName="extract" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.542964 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="007f3542-55a2-495b-a618-6933e425c7c3" containerName="extract" Jan 23 11:02:01 crc kubenswrapper[4689]: E0123 11:02:01.542978 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerName="extract" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.542984 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerName="extract" Jan 23 11:02:01 crc kubenswrapper[4689]: E0123 11:02:01.542996 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="007f3542-55a2-495b-a618-6933e425c7c3" containerName="util" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.543003 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="007f3542-55a2-495b-a618-6933e425c7c3" containerName="util" Jan 23 11:02:01 crc kubenswrapper[4689]: E0123 11:02:01.543017 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerName="util" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.543023 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerName="util" Jan 23 11:02:01 crc kubenswrapper[4689]: E0123 11:02:01.543030 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="007f3542-55a2-495b-a618-6933e425c7c3" containerName="pull" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.543035 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="007f3542-55a2-495b-a618-6933e425c7c3" containerName="pull" Jan 23 11:02:01 crc kubenswrapper[4689]: E0123 11:02:01.543043 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerName="pull" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.543049 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerName="pull" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.543171 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6b7cf7d-3b83-4abb-8ca1-9b623c85917c" containerName="extract" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.543183 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="007f3542-55a2-495b-a618-6933e425c7c3" containerName="extract" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.543788 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.545302 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.545854 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.546551 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.546996 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.547180 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.547855 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-pfllv" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.557550 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2"] Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.667775 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.667872 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-apiservice-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.667920 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmh98\" (UniqueName: \"kubernetes.io/projected/6cbb7c9e-32cf-4368-8983-96d4006dcd58-kube-api-access-cmh98\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.667963 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/6cbb7c9e-32cf-4368-8983-96d4006dcd58-manager-config\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.668106 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-webhook-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.768967 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.769028 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-apiservice-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.769850 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmh98\" (UniqueName: \"kubernetes.io/projected/6cbb7c9e-32cf-4368-8983-96d4006dcd58-kube-api-access-cmh98\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.769950 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/6cbb7c9e-32cf-4368-8983-96d4006dcd58-manager-config\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.770034 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-webhook-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.770907 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/6cbb7c9e-32cf-4368-8983-96d4006dcd58-manager-config\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.774361 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.774372 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-apiservice-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.774559 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6cbb7c9e-32cf-4368-8983-96d4006dcd58-webhook-cert\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.795816 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmh98\" (UniqueName: \"kubernetes.io/projected/6cbb7c9e-32cf-4368-8983-96d4006dcd58-kube-api-access-cmh98\") pod \"loki-operator-controller-manager-775d8c8b9c-rkqj2\" (UID: \"6cbb7c9e-32cf-4368-8983-96d4006dcd58\") " pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:01 crc kubenswrapper[4689]: I0123 11:02:01.859119 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:02 crc kubenswrapper[4689]: I0123 11:02:02.190307 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:02:02 crc kubenswrapper[4689]: I0123 11:02:02.190776 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:02:02 crc kubenswrapper[4689]: I0123 11:02:02.237909 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:02:02 crc kubenswrapper[4689]: I0123 11:02:02.335198 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2"] Jan 23 11:02:02 crc kubenswrapper[4689]: I0123 11:02:02.348809 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:02:03 crc kubenswrapper[4689]: I0123 11:02:03.301650 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" event={"ID":"6cbb7c9e-32cf-4368-8983-96d4006dcd58","Type":"ContainerStarted","Data":"6f0788fa2367c06fe731cd9ba364dbe3d65f90c1bc739fb406768b7414625690"} Jan 23 11:02:05 crc kubenswrapper[4689]: I0123 11:02:05.856078 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sjmnf"] Jan 23 11:02:05 crc kubenswrapper[4689]: I0123 11:02:05.856301 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sjmnf" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="registry-server" containerID="cri-o://8f9a50b51e2f93489a850837b83047b78b671826dcdbad3ef02c43b522ee8dd2" gracePeriod=2 Jan 23 11:02:06 crc kubenswrapper[4689]: I0123 11:02:06.325591 4689 generic.go:334] "Generic (PLEG): container finished" podID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerID="8f9a50b51e2f93489a850837b83047b78b671826dcdbad3ef02c43b522ee8dd2" exitCode=0 Jan 23 11:02:06 crc kubenswrapper[4689]: I0123 11:02:06.325635 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjmnf" event={"ID":"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c","Type":"ContainerDied","Data":"8f9a50b51e2f93489a850837b83047b78b671826dcdbad3ef02c43b522ee8dd2"} Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.030700 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.148223 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-catalog-content\") pod \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.148329 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-utilities\") pod \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.148433 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv6sp\" (UniqueName: \"kubernetes.io/projected/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-kube-api-access-jv6sp\") pod \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\" (UID: \"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c\") " Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.150669 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-utilities" (OuterVolumeSpecName: "utilities") pod "4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" (UID: "4803d54a-7f74-4a02-8f1f-6313eb5d2b5c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.159608 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-kube-api-access-jv6sp" (OuterVolumeSpecName: "kube-api-access-jv6sp") pod "4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" (UID: "4803d54a-7f74-4a02-8f1f-6313eb5d2b5c"). InnerVolumeSpecName "kube-api-access-jv6sp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.250563 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.250595 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jv6sp\" (UniqueName: \"kubernetes.io/projected/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-kube-api-access-jv6sp\") on node \"crc\" DevicePath \"\"" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.267361 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" (UID: "4803d54a-7f74-4a02-8f1f-6313eb5d2b5c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.335859 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjmnf" event={"ID":"4803d54a-7f74-4a02-8f1f-6313eb5d2b5c","Type":"ContainerDied","Data":"223459cc22fc8060976b617d949a3f5f231edbf5a058bdc76a50624314512cb4"} Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.335916 4689 scope.go:117] "RemoveContainer" containerID="8f9a50b51e2f93489a850837b83047b78b671826dcdbad3ef02c43b522ee8dd2" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.335955 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sjmnf" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.352080 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.362771 4689 scope.go:117] "RemoveContainer" containerID="4f86c8d42d585a02adad23648ac927b56ca78016c33f60277ea0b179d89f1153" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.365613 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sjmnf"] Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.370722 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sjmnf"] Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.388726 4689 scope.go:117] "RemoveContainer" containerID="03efac7e2bc276bf08ba71fa064b615298e04451c1a9f2a7de7a0507f54d0a17" Jan 23 11:02:07 crc kubenswrapper[4689]: I0123 11:02:07.654510 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" path="/var/lib/kubelet/pods/4803d54a-7f74-4a02-8f1f-6313eb5d2b5c/volumes" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.197869 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv"] Jan 23 11:02:08 crc kubenswrapper[4689]: E0123 11:02:08.198136 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="extract-content" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.198177 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="extract-content" Jan 23 11:02:08 crc kubenswrapper[4689]: E0123 11:02:08.198199 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="extract-utilities" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.198208 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="extract-utilities" Jan 23 11:02:08 crc kubenswrapper[4689]: E0123 11:02:08.198231 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="registry-server" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.198240 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="registry-server" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.198379 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="4803d54a-7f74-4a02-8f1f-6313eb5d2b5c" containerName="registry-server" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.198866 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.203636 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.203976 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.204136 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-kp8c7" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.209463 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv"] Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.267960 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmzpq\" (UniqueName: \"kubernetes.io/projected/4d4b9938-cbd8-4628-bd9e-c402b2cb3828-kube-api-access-mmzpq\") pod \"cluster-logging-operator-79cf69ddc8-n77sv\" (UID: \"4d4b9938-cbd8-4628-bd9e-c402b2cb3828\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.370855 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmzpq\" (UniqueName: \"kubernetes.io/projected/4d4b9938-cbd8-4628-bd9e-c402b2cb3828-kube-api-access-mmzpq\") pod \"cluster-logging-operator-79cf69ddc8-n77sv\" (UID: \"4d4b9938-cbd8-4628-bd9e-c402b2cb3828\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.403573 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmzpq\" (UniqueName: \"kubernetes.io/projected/4d4b9938-cbd8-4628-bd9e-c402b2cb3828-kube-api-access-mmzpq\") pod \"cluster-logging-operator-79cf69ddc8-n77sv\" (UID: \"4d4b9938-cbd8-4628-bd9e-c402b2cb3828\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" Jan 23 11:02:08 crc kubenswrapper[4689]: I0123 11:02:08.514336 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" Jan 23 11:02:09 crc kubenswrapper[4689]: I0123 11:02:09.926320 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv"] Jan 23 11:02:10 crc kubenswrapper[4689]: I0123 11:02:10.362936 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" event={"ID":"4d4b9938-cbd8-4628-bd9e-c402b2cb3828","Type":"ContainerStarted","Data":"70561d24c2cb0dfe494beb5d2957befacb2be6937a2aead42d241d540461d93e"} Jan 23 11:02:10 crc kubenswrapper[4689]: I0123 11:02:10.366195 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" event={"ID":"6cbb7c9e-32cf-4368-8983-96d4006dcd58","Type":"ContainerStarted","Data":"8ffcdc1a70527eced45a0ee10a6a530fcfcaa83963d63c4dae3652dbc9274813"} Jan 23 11:02:20 crc kubenswrapper[4689]: I0123 11:02:20.459676 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" event={"ID":"4d4b9938-cbd8-4628-bd9e-c402b2cb3828","Type":"ContainerStarted","Data":"4bc443ccfd0e4f06baa118fb95eb6ee166b9e0b292d2a870cbf7879f5b63d333"} Jan 23 11:02:20 crc kubenswrapper[4689]: I0123 11:02:20.462975 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" event={"ID":"6cbb7c9e-32cf-4368-8983-96d4006dcd58","Type":"ContainerStarted","Data":"01652aa388db634d40edffb1d921d6d4a149eb7b5692ceca2006b4a5d8fb6c7b"} Jan 23 11:02:20 crc kubenswrapper[4689]: I0123 11:02:20.463199 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:20 crc kubenswrapper[4689]: I0123 11:02:20.470883 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 11:02:20 crc kubenswrapper[4689]: I0123 11:02:20.509286 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-n77sv" podStartSLOduration=2.530661815 podStartE2EDuration="12.5092615s" podCreationTimestamp="2026-01-23 11:02:08 +0000 UTC" firstStartedPulling="2026-01-23 11:02:09.936421038 +0000 UTC m=+794.561100897" lastFinishedPulling="2026-01-23 11:02:19.915020713 +0000 UTC m=+804.539700582" observedRunningTime="2026-01-23 11:02:20.491696401 +0000 UTC m=+805.116376270" watchObservedRunningTime="2026-01-23 11:02:20.5092615 +0000 UTC m=+805.133941349" Jan 23 11:02:20 crc kubenswrapper[4689]: I0123 11:02:20.516641 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podStartSLOduration=1.9653023699999999 podStartE2EDuration="19.516624228s" podCreationTimestamp="2026-01-23 11:02:01 +0000 UTC" firstStartedPulling="2026-01-23 11:02:02.342006209 +0000 UTC m=+786.966686068" lastFinishedPulling="2026-01-23 11:02:19.893328057 +0000 UTC m=+804.518007926" observedRunningTime="2026-01-23 11:02:20.513248452 +0000 UTC m=+805.137928321" watchObservedRunningTime="2026-01-23 11:02:20.516624228 +0000 UTC m=+805.141304087" Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.893887 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.895421 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.898059 4689 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-p6hhb" Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.898101 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.898293 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.902658 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.962733 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\") pod \"minio\" (UID: \"34a6efb1-115a-431a-a8a1-2e651990b88f\") " pod="minio-dev/minio" Jan 23 11:02:26 crc kubenswrapper[4689]: I0123 11:02:26.963184 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flb2c\" (UniqueName: \"kubernetes.io/projected/34a6efb1-115a-431a-a8a1-2e651990b88f-kube-api-access-flb2c\") pod \"minio\" (UID: \"34a6efb1-115a-431a-a8a1-2e651990b88f\") " pod="minio-dev/minio" Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.065065 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\") pod \"minio\" (UID: \"34a6efb1-115a-431a-a8a1-2e651990b88f\") " pod="minio-dev/minio" Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.065134 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flb2c\" (UniqueName: \"kubernetes.io/projected/34a6efb1-115a-431a-a8a1-2e651990b88f-kube-api-access-flb2c\") pod \"minio\" (UID: \"34a6efb1-115a-431a-a8a1-2e651990b88f\") " pod="minio-dev/minio" Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.068494 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.068551 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\") pod \"minio\" (UID: \"34a6efb1-115a-431a-a8a1-2e651990b88f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9bc32803ac6dc478d5e4e506e08e02f987e65f0e18bfb7c4a8b7894f99bd940b/globalmount\"" pod="minio-dev/minio" Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.105052 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flb2c\" (UniqueName: \"kubernetes.io/projected/34a6efb1-115a-431a-a8a1-2e651990b88f-kube-api-access-flb2c\") pod \"minio\" (UID: \"34a6efb1-115a-431a-a8a1-2e651990b88f\") " pod="minio-dev/minio" Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.108089 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4858ea42-35ac-474e-9b91-70b718ed91e2\") pod \"minio\" (UID: \"34a6efb1-115a-431a-a8a1-2e651990b88f\") " pod="minio-dev/minio" Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.233117 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.467958 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Jan 23 11:02:27 crc kubenswrapper[4689]: I0123 11:02:27.511669 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"34a6efb1-115a-431a-a8a1-2e651990b88f","Type":"ContainerStarted","Data":"acb4386261e5f33e10a54702cc1245adf0d53b697462d7b20a613a696a9c200f"} Jan 23 11:02:31 crc kubenswrapper[4689]: I0123 11:02:31.543703 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"34a6efb1-115a-431a-a8a1-2e651990b88f","Type":"ContainerStarted","Data":"aafb0ac0fb056104ac79799ee0b05e4e90bb773db1e943eb2b8a8d778d3135a4"} Jan 23 11:02:31 crc kubenswrapper[4689]: I0123 11:02:31.567572 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=5.675533473 podStartE2EDuration="8.567556324s" podCreationTimestamp="2026-01-23 11:02:23 +0000 UTC" firstStartedPulling="2026-01-23 11:02:27.490827615 +0000 UTC m=+812.115507464" lastFinishedPulling="2026-01-23 11:02:30.382850456 +0000 UTC m=+815.007530315" observedRunningTime="2026-01-23 11:02:31.564469615 +0000 UTC m=+816.189149474" watchObservedRunningTime="2026-01-23 11:02:31.567556324 +0000 UTC m=+816.192236183" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.168881 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.170113 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.179059 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.179228 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-dw7s6" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.179685 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.180384 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.180982 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.214556 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.322095 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-rgsmc"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.322796 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j77w2\" (UniqueName: \"kubernetes.io/projected/72abaa76-42ea-4987-8f23-f4aba4f669e2-kube-api-access-j77w2\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.322849 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.322917 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.322962 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.322981 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72abaa76-42ea-4987-8f23-f4aba4f669e2-config\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.323165 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.324948 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.325860 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.326140 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.336634 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-rgsmc"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.399134 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.400416 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.403882 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.404556 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424459 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fqp7\" (UniqueName: \"kubernetes.io/projected/8297556c-bbae-4eb0-b3da-b09a005c90f6-kube-api-access-8fqp7\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424538 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424591 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424614 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8297556c-bbae-4eb0-b3da-b09a005c90f6-config\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424639 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72abaa76-42ea-4987-8f23-f4aba4f669e2-config\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424662 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424690 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j77w2\" (UniqueName: \"kubernetes.io/projected/72abaa76-42ea-4987-8f23-f4aba4f669e2-kube-api-access-j77w2\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424719 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424735 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424752 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-s3\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.424772 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.426247 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.426353 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.426907 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72abaa76-42ea-4987-8f23-f4aba4f669e2-config\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.433885 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.439816 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/72abaa76-42ea-4987-8f23-f4aba4f669e2-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.448924 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j77w2\" (UniqueName: \"kubernetes.io/projected/72abaa76-42ea-4987-8f23-f4aba4f669e2-kube-api-access-j77w2\") pod \"logging-loki-distributor-5f678c8dd6-vllhz\" (UID: \"72abaa76-42ea-4987-8f23-f4aba4f669e2\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.517189 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-74447864d7-m77fj"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.518468 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.522135 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.522392 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.522555 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.522714 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-h62ff" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.522923 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.523072 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.524285 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525287 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525321 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525350 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-s3\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525379 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fqp7\" (UniqueName: \"kubernetes.io/projected/8297556c-bbae-4eb0-b3da-b09a005c90f6-kube-api-access-8fqp7\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525408 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf0752a-d119-41d3-913f-6377a601e8ca-config\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525447 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525477 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525500 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525556 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8297556c-bbae-4eb0-b3da-b09a005c90f6-config\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525591 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7f4g\" (UniqueName: \"kubernetes.io/projected/faf0752a-d119-41d3-913f-6377a601e8ca-kube-api-access-j7f4g\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.525619 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.527460 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.528548 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8297556c-bbae-4eb0-b3da-b09a005c90f6-config\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.535051 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-s3\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.536851 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.538787 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/8297556c-bbae-4eb0-b3da-b09a005c90f6-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.547283 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-74447864d7-6nhsx"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.548446 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.558487 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fqp7\" (UniqueName: \"kubernetes.io/projected/8297556c-bbae-4eb0-b3da-b09a005c90f6-kube-api-access-8fqp7\") pod \"logging-loki-querier-76788598db-rgsmc\" (UID: \"8297556c-bbae-4eb0-b3da-b09a005c90f6\") " pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.562570 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-74447864d7-m77fj"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.583756 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-74447864d7-6nhsx"] Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627271 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-ca-bundle\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627321 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627358 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627377 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627392 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-lokistack-gateway\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627409 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tls-secret\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627435 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627463 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9ww8\" (UniqueName: \"kubernetes.io/projected/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-kube-api-access-z9ww8\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627488 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627507 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7f4g\" (UniqueName: \"kubernetes.io/projected/faf0752a-d119-41d3-913f-6377a601e8ca-kube-api-access-j7f4g\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627537 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tenants\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627555 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-rbac\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.627587 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf0752a-d119-41d3-913f-6377a601e8ca-config\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.628566 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf0752a-d119-41d3-913f-6377a601e8ca-config\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.629263 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.634238 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.637469 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/faf0752a-d119-41d3-913f-6377a601e8ca-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.641011 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.653892 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7f4g\" (UniqueName: \"kubernetes.io/projected/faf0752a-d119-41d3-913f-6377a601e8ca-kube-api-access-j7f4g\") pod \"logging-loki-query-frontend-69d9546745-6sm7h\" (UID: \"faf0752a-d119-41d3-913f-6377a601e8ca\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.723461 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729218 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-ca-bundle\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729278 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-lokistack-gateway\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729307 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tls-secret\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729332 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-lokistack-gateway\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729356 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729379 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729402 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tls-secret\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729419 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-ca-bundle\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729441 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9ww8\" (UniqueName: \"kubernetes.io/projected/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-kube-api-access-z9ww8\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729464 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729486 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-rbac\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729517 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tenants\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729533 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-rbac\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729553 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tenants\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729576 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.729603 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq2kj\" (UniqueName: \"kubernetes.io/projected/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-kube-api-access-zq2kj\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: E0123 11:02:38.730029 4689 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.730660 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-ca-bundle\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: E0123 11:02:38.731941 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tls-secret podName:ee6fa0a6-5ac3-4202-9280-8babe4cb29a0 nodeName:}" failed. No retries permitted until 2026-01-23 11:02:39.230083614 +0000 UTC m=+823.854763553 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tls-secret") pod "logging-loki-gateway-74447864d7-m77fj" (UID: "ee6fa0a6-5ac3-4202-9280-8babe4cb29a0") : secret "logging-loki-gateway-http" not found Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.732934 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-rbac\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.734830 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.737875 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.738894 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-lokistack-gateway\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.739603 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tenants\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.748972 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9ww8\" (UniqueName: \"kubernetes.io/projected/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-kube-api-access-z9ww8\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831385 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-lokistack-gateway\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831444 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831482 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tls-secret\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831507 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-ca-bundle\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831551 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-rbac\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831606 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tenants\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831640 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.831674 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq2kj\" (UniqueName: \"kubernetes.io/projected/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-kube-api-access-zq2kj\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.832949 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-lokistack-gateway\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: E0123 11:02:38.833478 4689 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Jan 23 11:02:38 crc kubenswrapper[4689]: E0123 11:02:38.833557 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tls-secret podName:fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2 nodeName:}" failed. No retries permitted until 2026-01-23 11:02:39.333536704 +0000 UTC m=+823.958216653 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tls-secret") pod "logging-loki-gateway-74447864d7-6nhsx" (UID: "fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2") : secret "logging-loki-gateway-http" not found Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.837768 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-rbac\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.838012 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.838201 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-ca-bundle\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.839314 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.845534 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tenants\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:38 crc kubenswrapper[4689]: I0123 11:02:38.848295 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq2kj\" (UniqueName: \"kubernetes.io/projected/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-kube-api-access-zq2kj\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.015284 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.108698 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-rgsmc"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.189133 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h"] Jan 23 11:02:39 crc kubenswrapper[4689]: W0123 11:02:39.195045 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfaf0752a_d119_41d3_913f_6377a601e8ca.slice/crio-ee109c2457719a86de70ad5af504be9a6099a80964b7a9b49bda81a8be737b59 WatchSource:0}: Error finding container ee109c2457719a86de70ad5af504be9a6099a80964b7a9b49bda81a8be737b59: Status 404 returned error can't find the container with id ee109c2457719a86de70ad5af504be9a6099a80964b7a9b49bda81a8be737b59 Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.238010 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tls-secret\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.242884 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/ee6fa0a6-5ac3-4202-9280-8babe4cb29a0-tls-secret\") pod \"logging-loki-gateway-74447864d7-m77fj\" (UID: \"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0\") " pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.337449 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.338494 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.339284 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tls-secret\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.341545 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.342483 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2-tls-secret\") pod \"logging-loki-gateway-74447864d7-6nhsx\" (UID: \"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2\") " pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.342648 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.371720 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.392023 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.392905 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.396311 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.396695 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.410245 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440503 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f611e01d-7405-4805-a57b-beaa051c46c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f611e01d-7405-4805-a57b-beaa051c46c7\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440556 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440594 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440615 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440655 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440693 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8cm8\" (UniqueName: \"kubernetes.io/projected/cb56bf0f-badb-490a-be0a-2ef41c9a2459-kube-api-access-h8cm8\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440739 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.440772 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb56bf0f-badb-490a-be0a-2ef41c9a2459-config\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.463431 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.464533 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.466219 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.466524 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.477423 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.516700 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.524122 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542089 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542190 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czphj\" (UniqueName: \"kubernetes.io/projected/79a8bb59-41ce-4777-90af-ded6dfe2e080-kube-api-access-czphj\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542231 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542262 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542286 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542316 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542342 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542370 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8cm8\" (UniqueName: \"kubernetes.io/projected/cb56bf0f-badb-490a-be0a-2ef41c9a2459-kube-api-access-h8cm8\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542404 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542429 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb56bf0f-badb-490a-be0a-2ef41c9a2459-config\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542452 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542490 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542525 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f611e01d-7405-4805-a57b-beaa051c46c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f611e01d-7405-4805-a57b-beaa051c46c7\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542560 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542587 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542605 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542630 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcjrt\" (UniqueName: \"kubernetes.io/projected/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-kube-api-access-qcjrt\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542649 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79a8bb59-41ce-4777-90af-ded6dfe2e080-config\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542670 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-config\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542697 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542733 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.542751 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.543709 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.544534 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb56bf0f-badb-490a-be0a-2ef41c9a2459-config\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.548413 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.548972 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.549337 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.549382 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f611e01d-7405-4805-a57b-beaa051c46c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f611e01d-7405-4805-a57b-beaa051c46c7\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/672999b0c578c0eef0108b6b609d73e1b85bf496f0cbdc67928c7de9b1d5519d/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.549915 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.549945 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2af8b24508b77483c6f90c01e1f67ebf639552d3e7a0f5f25183d25280481c22/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.550649 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/cb56bf0f-badb-490a-be0a-2ef41c9a2459-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.559431 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8cm8\" (UniqueName: \"kubernetes.io/projected/cb56bf0f-badb-490a-be0a-2ef41c9a2459-kube-api-access-h8cm8\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.589194 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f611e01d-7405-4805-a57b-beaa051c46c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f611e01d-7405-4805-a57b-beaa051c46c7\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.592790 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6ff06a4-9173-42d6-8817-e6c640459d89\") pod \"logging-loki-ingester-0\" (UID: \"cb56bf0f-badb-490a-be0a-2ef41c9a2459\") " pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.613433 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" event={"ID":"faf0752a-d119-41d3-913f-6377a601e8ca","Type":"ContainerStarted","Data":"ee109c2457719a86de70ad5af504be9a6099a80964b7a9b49bda81a8be737b59"} Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.620233 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" event={"ID":"8297556c-bbae-4eb0-b3da-b09a005c90f6","Type":"ContainerStarted","Data":"ab4b79fc3d7c8fc0f75238770552566a703a146d10d1e46b2ec50716049f3b0f"} Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.623211 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" event={"ID":"72abaa76-42ea-4987-8f23-f4aba4f669e2","Type":"ContainerStarted","Data":"cfbd1a133d045278fd569fdf37831e60352bb76edab92737e36697ca0277b8f9"} Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.643766 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-config\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.643824 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644234 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644274 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644316 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czphj\" (UniqueName: \"kubernetes.io/projected/79a8bb59-41ce-4777-90af-ded6dfe2e080-kube-api-access-czphj\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644359 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644436 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644466 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644504 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644545 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644571 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644595 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644618 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcjrt\" (UniqueName: \"kubernetes.io/projected/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-kube-api-access-qcjrt\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.644641 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79a8bb59-41ce-4777-90af-ded6dfe2e080-config\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.645041 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-config\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.645755 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79a8bb59-41ce-4777-90af-ded6dfe2e080-config\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.645768 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.646432 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.647176 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.649309 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.653080 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.655131 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.660006 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.660554 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/79a8bb59-41ce-4777-90af-ded6dfe2e080-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.663666 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcjrt\" (UniqueName: \"kubernetes.io/projected/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-kube-api-access-qcjrt\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.665227 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a86f4ae1-8a8c-4178-a905-c03ad33a2eca-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.669304 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czphj\" (UniqueName: \"kubernetes.io/projected/79a8bb59-41ce-4777-90af-ded6dfe2e080-kube-api-access-czphj\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.675066 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.675098 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/74a254adfd28159d6554039dbe49bc61049f9f8d2fea6b33f7bdd7a3b5986d77/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.677469 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.677518 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5b3bcdbaad4fbcfc3f0f2a3491d8f758f12ebcad761c7f777a984ea46fb147e9/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.708884 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06124cee-0e8a-4730-8c09-09f8548b57ef\") pod \"logging-loki-compactor-0\" (UID: \"79a8bb59-41ce-4777-90af-ded6dfe2e080\") " pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.711363 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-07e55a36-b2a9-4801-bca9-2649a5aa636a\") pod \"logging-loki-index-gateway-0\" (UID: \"a86f4ae1-8a8c-4178-a905-c03ad33a2eca\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.723130 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.789426 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:39 crc kubenswrapper[4689]: I0123 11:02:39.983654 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-74447864d7-m77fj"] Jan 23 11:02:40 crc kubenswrapper[4689]: W0123 11:02:40.001262 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee6fa0a6_5ac3_4202_9280_8babe4cb29a0.slice/crio-5c739b22687e7e21b9b312341af0c9d41ad84c8c2629417843c83eb4e7cf02b6 WatchSource:0}: Error finding container 5c739b22687e7e21b9b312341af0c9d41ad84c8c2629417843c83eb4e7cf02b6: Status 404 returned error can't find the container with id 5c739b22687e7e21b9b312341af0c9d41ad84c8c2629417843c83eb4e7cf02b6 Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.077814 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-74447864d7-6nhsx"] Jan 23 11:02:40 crc kubenswrapper[4689]: W0123 11:02:40.081359 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc08c5e9_b208_45b6_bd9e_7cc821d7bbe2.slice/crio-ba9069210c84fd71a9464f96d36cbc5aae25549fb12aa7cc60fde9c834e49dea WatchSource:0}: Error finding container ba9069210c84fd71a9464f96d36cbc5aae25549fb12aa7cc60fde9c834e49dea: Status 404 returned error can't find the container with id ba9069210c84fd71a9464f96d36cbc5aae25549fb12aa7cc60fde9c834e49dea Jan 23 11:02:40 crc kubenswrapper[4689]: W0123 11:02:40.178932 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79a8bb59_41ce_4777_90af_ded6dfe2e080.slice/crio-8ecd2fa2dcdc72b18dd88722fa4d16f6840ae9e4c7e83dee72e6e7914b6e8e05 WatchSource:0}: Error finding container 8ecd2fa2dcdc72b18dd88722fa4d16f6840ae9e4c7e83dee72e6e7914b6e8e05: Status 404 returned error can't find the container with id 8ecd2fa2dcdc72b18dd88722fa4d16f6840ae9e4c7e83dee72e6e7914b6e8e05 Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.178985 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 23 11:02:40 crc kubenswrapper[4689]: W0123 11:02:40.183884 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb56bf0f_badb_490a_be0a_2ef41c9a2459.slice/crio-80973700224cb0bf9232d08ab3e20eef3f8e35698d997c315cf290325f99e216 WatchSource:0}: Error finding container 80973700224cb0bf9232d08ab3e20eef3f8e35698d997c315cf290325f99e216: Status 404 returned error can't find the container with id 80973700224cb0bf9232d08ab3e20eef3f8e35698d997c315cf290325f99e216 Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.184750 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.261049 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 23 11:02:40 crc kubenswrapper[4689]: W0123 11:02:40.261468 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda86f4ae1_8a8c_4178_a905_c03ad33a2eca.slice/crio-a9d4dbe031914f6fecf6ebcc11be8d5f5c90037e1955d7d0b99797e101266689 WatchSource:0}: Error finding container a9d4dbe031914f6fecf6ebcc11be8d5f5c90037e1955d7d0b99797e101266689: Status 404 returned error can't find the container with id a9d4dbe031914f6fecf6ebcc11be8d5f5c90037e1955d7d0b99797e101266689 Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.639432 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" event={"ID":"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0","Type":"ContainerStarted","Data":"5c739b22687e7e21b9b312341af0c9d41ad84c8c2629417843c83eb4e7cf02b6"} Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.640575 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" event={"ID":"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2","Type":"ContainerStarted","Data":"ba9069210c84fd71a9464f96d36cbc5aae25549fb12aa7cc60fde9c834e49dea"} Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.641897 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"a86f4ae1-8a8c-4178-a905-c03ad33a2eca","Type":"ContainerStarted","Data":"a9d4dbe031914f6fecf6ebcc11be8d5f5c90037e1955d7d0b99797e101266689"} Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.643441 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"79a8bb59-41ce-4777-90af-ded6dfe2e080","Type":"ContainerStarted","Data":"8ecd2fa2dcdc72b18dd88722fa4d16f6840ae9e4c7e83dee72e6e7914b6e8e05"} Jan 23 11:02:40 crc kubenswrapper[4689]: I0123 11:02:40.644680 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"cb56bf0f-badb-490a-be0a-2ef41c9a2459","Type":"ContainerStarted","Data":"80973700224cb0bf9232d08ab3e20eef3f8e35698d997c315cf290325f99e216"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.672623 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"a86f4ae1-8a8c-4178-a905-c03ad33a2eca","Type":"ContainerStarted","Data":"86cd2eab4b80be913e9d57a91e36529f53152d2da5d9823cda5865da6addf5bd"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.673269 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.674761 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"79a8bb59-41ce-4777-90af-ded6dfe2e080","Type":"ContainerStarted","Data":"235350d4f2f9450423bc28e8bdcf44fa59d5765c30efbd6585fcd8634e94854a"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.674840 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.675830 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"cb56bf0f-badb-490a-be0a-2ef41c9a2459","Type":"ContainerStarted","Data":"006156e317a7856761a6d369c937a70d8e5535446702afa71775ff44e306e96b"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.676341 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.677696 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" event={"ID":"faf0752a-d119-41d3-913f-6377a601e8ca","Type":"ContainerStarted","Data":"071c5aec7f52e9f27d7f11056f980d4a6d13346e453b5677dbf5a408219df2de"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.677822 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.679117 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" event={"ID":"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0","Type":"ContainerStarted","Data":"5f3bbcd6d84b905281764537c85ab8eb9b786d3b4e82b8b6fc09466b4c24428e"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.680512 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" event={"ID":"72abaa76-42ea-4987-8f23-f4aba4f669e2","Type":"ContainerStarted","Data":"0ec60d8f174c1d2be8f6ad26efdead8c243380c211090e530f657975aaeba475"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.680638 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.682091 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" event={"ID":"8297556c-bbae-4eb0-b3da-b09a005c90f6","Type":"ContainerStarted","Data":"3b67f11f6f0224f9c06e8142ae59e2402fd98e3c78024ccde2571133f9bcb0b9"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.682189 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.683415 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" event={"ID":"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2","Type":"ContainerStarted","Data":"d45e676eddde643a1dd9f9af2bc411003201fb5918b24ca5be87ea9f1433562c"} Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.695729 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=2.820740732 podStartE2EDuration="5.695712425s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:40.263708557 +0000 UTC m=+824.888388436" lastFinishedPulling="2026-01-23 11:02:43.13868027 +0000 UTC m=+827.763360129" observedRunningTime="2026-01-23 11:02:43.693773305 +0000 UTC m=+828.318453174" watchObservedRunningTime="2026-01-23 11:02:43.695712425 +0000 UTC m=+828.320392284" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.713717 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podStartSLOduration=1.687183474 podStartE2EDuration="5.713702566s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:39.197299568 +0000 UTC m=+823.821979427" lastFinishedPulling="2026-01-23 11:02:43.22381866 +0000 UTC m=+827.848498519" observedRunningTime="2026-01-23 11:02:43.711085768 +0000 UTC m=+828.335765637" watchObservedRunningTime="2026-01-23 11:02:43.713702566 +0000 UTC m=+828.338382425" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.737066 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=2.700556434 podStartE2EDuration="5.737020813s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:40.183915654 +0000 UTC m=+824.808595513" lastFinishedPulling="2026-01-23 11:02:43.220380033 +0000 UTC m=+827.845059892" observedRunningTime="2026-01-23 11:02:43.731681496 +0000 UTC m=+828.356361355" watchObservedRunningTime="2026-01-23 11:02:43.737020813 +0000 UTC m=+828.361700672" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.756922 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=2.743851443 podStartE2EDuration="5.756900722s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:40.188301066 +0000 UTC m=+824.812980925" lastFinishedPulling="2026-01-23 11:02:43.201350345 +0000 UTC m=+827.826030204" observedRunningTime="2026-01-23 11:02:43.752780966 +0000 UTC m=+828.377460825" watchObservedRunningTime="2026-01-23 11:02:43.756900722 +0000 UTC m=+828.381580581" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.769499 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podStartSLOduration=1.593342411 podStartE2EDuration="5.769466624s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:39.025196461 +0000 UTC m=+823.649876330" lastFinishedPulling="2026-01-23 11:02:43.201320684 +0000 UTC m=+827.826000543" observedRunningTime="2026-01-23 11:02:43.768253143 +0000 UTC m=+828.392933002" watchObservedRunningTime="2026-01-23 11:02:43.769466624 +0000 UTC m=+828.394146483" Jan 23 11:02:43 crc kubenswrapper[4689]: I0123 11:02:43.787840 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podStartSLOduration=1.637081811 podStartE2EDuration="5.787824094s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:39.113828631 +0000 UTC m=+823.738508490" lastFinishedPulling="2026-01-23 11:02:43.264570914 +0000 UTC m=+827.889250773" observedRunningTime="2026-01-23 11:02:43.786693685 +0000 UTC m=+828.411373554" watchObservedRunningTime="2026-01-23 11:02:43.787824094 +0000 UTC m=+828.412503953" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.699559 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" event={"ID":"ee6fa0a6-5ac3-4202-9280-8babe4cb29a0","Type":"ContainerStarted","Data":"0f42c6ae2decb4aae7e4de49b2dd4a6af17b60441b20835c0da8f7c07d08df4c"} Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.700042 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.700061 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.701826 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" event={"ID":"fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2","Type":"ContainerStarted","Data":"f872fb24544287f29d00753d2278efdd4437c6e65903cfd154e2669675061735"} Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.702238 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.709729 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.711502 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.712292 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.720595 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podStartSLOduration=2.409891831 podStartE2EDuration="7.720577238s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:40.00410143 +0000 UTC m=+824.628781289" lastFinishedPulling="2026-01-23 11:02:45.314786837 +0000 UTC m=+829.939466696" observedRunningTime="2026-01-23 11:02:45.720515876 +0000 UTC m=+830.345195735" watchObservedRunningTime="2026-01-23 11:02:45.720577238 +0000 UTC m=+830.345257097" Jan 23 11:02:45 crc kubenswrapper[4689]: I0123 11:02:45.743278 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podStartSLOduration=2.506499125 podStartE2EDuration="7.743260299s" podCreationTimestamp="2026-01-23 11:02:38 +0000 UTC" firstStartedPulling="2026-01-23 11:02:40.083525323 +0000 UTC m=+824.708205182" lastFinishedPulling="2026-01-23 11:02:45.320286487 +0000 UTC m=+829.944966356" observedRunningTime="2026-01-23 11:02:45.73782428 +0000 UTC m=+830.362504159" watchObservedRunningTime="2026-01-23 11:02:45.743260299 +0000 UTC m=+830.367940158" Jan 23 11:02:46 crc kubenswrapper[4689]: I0123 11:02:46.707260 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:46 crc kubenswrapper[4689]: I0123 11:02:46.715292 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.632086 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pqn4q"] Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.634337 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.685504 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pqn4q"] Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.728068 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-utilities\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.728434 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-catalog-content\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.728612 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dh2s\" (UniqueName: \"kubernetes.io/projected/4efc9b1e-b19b-4413-ba97-9aad796e0109-kube-api-access-2dh2s\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.829685 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-catalog-content\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.830028 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dh2s\" (UniqueName: \"kubernetes.io/projected/4efc9b1e-b19b-4413-ba97-9aad796e0109-kube-api-access-2dh2s\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.830121 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-utilities\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.830280 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-catalog-content\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.830636 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-utilities\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.849553 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dh2s\" (UniqueName: \"kubernetes.io/projected/4efc9b1e-b19b-4413-ba97-9aad796e0109-kube-api-access-2dh2s\") pod \"community-operators-pqn4q\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:56 crc kubenswrapper[4689]: I0123 11:02:56.952411 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:02:57 crc kubenswrapper[4689]: I0123 11:02:57.470541 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pqn4q"] Jan 23 11:02:57 crc kubenswrapper[4689]: I0123 11:02:57.789409 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqn4q" event={"ID":"4efc9b1e-b19b-4413-ba97-9aad796e0109","Type":"ContainerStarted","Data":"f1af57a17dfa55110169550e129e9b0d939563c7a6cf4e720c340109f6f48ca3"} Jan 23 11:02:58 crc kubenswrapper[4689]: I0123 11:02:58.533489 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 11:02:58 crc kubenswrapper[4689]: I0123 11:02:58.647086 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 11:02:58 crc kubenswrapper[4689]: I0123 11:02:58.731889 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 11:02:59 crc kubenswrapper[4689]: I0123 11:02:59.668869 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Jan 23 11:02:59 crc kubenswrapper[4689]: I0123 11:02:59.669323 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 11:02:59 crc kubenswrapper[4689]: I0123 11:02:59.728536 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Jan 23 11:02:59 crc kubenswrapper[4689]: I0123 11:02:59.798715 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 11:03:00 crc kubenswrapper[4689]: I0123 11:03:00.817475 4689 generic.go:334] "Generic (PLEG): container finished" podID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerID="aa1b2b346165fb6338c437112ef94afb06b9ea61bf3498bd5c3345e04c8f3985" exitCode=0 Jan 23 11:03:00 crc kubenswrapper[4689]: I0123 11:03:00.817532 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqn4q" event={"ID":"4efc9b1e-b19b-4413-ba97-9aad796e0109","Type":"ContainerDied","Data":"aa1b2b346165fb6338c437112ef94afb06b9ea61bf3498bd5c3345e04c8f3985"} Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.705294 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qgt78"] Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.707356 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.717019 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qgt78"] Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.826897 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-utilities\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.827187 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-catalog-content\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.827257 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbk5c\" (UniqueName: \"kubernetes.io/projected/a8ac71c7-1da1-4da4-95ab-493717e57798-kube-api-access-hbk5c\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.834348 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqn4q" event={"ID":"4efc9b1e-b19b-4413-ba97-9aad796e0109","Type":"ContainerStarted","Data":"c437bb3ed45543cac0bd65ca3ca46eb906484825c38a180372acf6701359ac87"} Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.928468 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbk5c\" (UniqueName: \"kubernetes.io/projected/a8ac71c7-1da1-4da4-95ab-493717e57798-kube-api-access-hbk5c\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.928576 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-utilities\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.928619 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-catalog-content\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.929312 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-catalog-content\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.930337 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-utilities\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:02 crc kubenswrapper[4689]: I0123 11:03:02.955083 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbk5c\" (UniqueName: \"kubernetes.io/projected/a8ac71c7-1da1-4da4-95ab-493717e57798-kube-api-access-hbk5c\") pod \"certified-operators-qgt78\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:03 crc kubenswrapper[4689]: I0123 11:03:03.028504 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:03 crc kubenswrapper[4689]: I0123 11:03:03.516777 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qgt78"] Jan 23 11:03:03 crc kubenswrapper[4689]: W0123 11:03:03.518576 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8ac71c7_1da1_4da4_95ab_493717e57798.slice/crio-75404da9cbd9d42ae7e1aa690f73faf448948d83336f87fb9109765d49f69945 WatchSource:0}: Error finding container 75404da9cbd9d42ae7e1aa690f73faf448948d83336f87fb9109765d49f69945: Status 404 returned error can't find the container with id 75404da9cbd9d42ae7e1aa690f73faf448948d83336f87fb9109765d49f69945 Jan 23 11:03:03 crc kubenswrapper[4689]: I0123 11:03:03.845426 4689 generic.go:334] "Generic (PLEG): container finished" podID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerID="c437bb3ed45543cac0bd65ca3ca46eb906484825c38a180372acf6701359ac87" exitCode=0 Jan 23 11:03:03 crc kubenswrapper[4689]: I0123 11:03:03.845495 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqn4q" event={"ID":"4efc9b1e-b19b-4413-ba97-9aad796e0109","Type":"ContainerDied","Data":"c437bb3ed45543cac0bd65ca3ca46eb906484825c38a180372acf6701359ac87"} Jan 23 11:03:03 crc kubenswrapper[4689]: I0123 11:03:03.852635 4689 generic.go:334] "Generic (PLEG): container finished" podID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerID="1c7c093ac44f06c69e3d6999b0b3b8985ff69ced5e34bf4747995824c2e1ae6f" exitCode=0 Jan 23 11:03:03 crc kubenswrapper[4689]: I0123 11:03:03.852675 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgt78" event={"ID":"a8ac71c7-1da1-4da4-95ab-493717e57798","Type":"ContainerDied","Data":"1c7c093ac44f06c69e3d6999b0b3b8985ff69ced5e34bf4747995824c2e1ae6f"} Jan 23 11:03:03 crc kubenswrapper[4689]: I0123 11:03:03.852730 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgt78" event={"ID":"a8ac71c7-1da1-4da4-95ab-493717e57798","Type":"ContainerStarted","Data":"75404da9cbd9d42ae7e1aa690f73faf448948d83336f87fb9109765d49f69945"} Jan 23 11:03:04 crc kubenswrapper[4689]: I0123 11:03:04.861678 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgt78" event={"ID":"a8ac71c7-1da1-4da4-95ab-493717e57798","Type":"ContainerStarted","Data":"30bc51b87cddbe581f73680564d7219d7bb523674b2d2fcf1fe9e3148960822c"} Jan 23 11:03:04 crc kubenswrapper[4689]: I0123 11:03:04.864708 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqn4q" event={"ID":"4efc9b1e-b19b-4413-ba97-9aad796e0109","Type":"ContainerStarted","Data":"2bb2b41aba1e2dd0ec6a3c23b17f12523fa3aab6c335be3e6ebd9cb3fead4c42"} Jan 23 11:03:04 crc kubenswrapper[4689]: I0123 11:03:04.909715 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pqn4q" podStartSLOduration=6.486251614 podStartE2EDuration="8.909691335s" podCreationTimestamp="2026-01-23 11:02:56 +0000 UTC" firstStartedPulling="2026-01-23 11:03:01.829524797 +0000 UTC m=+846.454204696" lastFinishedPulling="2026-01-23 11:03:04.252964568 +0000 UTC m=+848.877644417" observedRunningTime="2026-01-23 11:03:04.905965186 +0000 UTC m=+849.530645045" watchObservedRunningTime="2026-01-23 11:03:04.909691335 +0000 UTC m=+849.534371204" Jan 23 11:03:05 crc kubenswrapper[4689]: I0123 11:03:05.877889 4689 generic.go:334] "Generic (PLEG): container finished" podID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerID="30bc51b87cddbe581f73680564d7219d7bb523674b2d2fcf1fe9e3148960822c" exitCode=0 Jan 23 11:03:05 crc kubenswrapper[4689]: I0123 11:03:05.877938 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgt78" event={"ID":"a8ac71c7-1da1-4da4-95ab-493717e57798","Type":"ContainerDied","Data":"30bc51b87cddbe581f73680564d7219d7bb523674b2d2fcf1fe9e3148960822c"} Jan 23 11:03:06 crc kubenswrapper[4689]: I0123 11:03:06.888880 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgt78" event={"ID":"a8ac71c7-1da1-4da4-95ab-493717e57798","Type":"ContainerStarted","Data":"03ad1aec1e07f3b609c27cd653a90c3858d1d1e6feb33f81a190a994d3cd1091"} Jan 23 11:03:06 crc kubenswrapper[4689]: I0123 11:03:06.913164 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qgt78" podStartSLOduration=2.428688793 podStartE2EDuration="4.913127115s" podCreationTimestamp="2026-01-23 11:03:02 +0000 UTC" firstStartedPulling="2026-01-23 11:03:03.855140784 +0000 UTC m=+848.479820643" lastFinishedPulling="2026-01-23 11:03:06.339579106 +0000 UTC m=+850.964258965" observedRunningTime="2026-01-23 11:03:06.908341759 +0000 UTC m=+851.533021628" watchObservedRunningTime="2026-01-23 11:03:06.913127115 +0000 UTC m=+851.537806974" Jan 23 11:03:06 crc kubenswrapper[4689]: I0123 11:03:06.952667 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:03:06 crc kubenswrapper[4689]: I0123 11:03:06.952716 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:03:06 crc kubenswrapper[4689]: I0123 11:03:06.988543 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:03:09 crc kubenswrapper[4689]: I0123 11:03:09.671179 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Jan 23 11:03:09 crc kubenswrapper[4689]: I0123 11:03:09.671797 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 11:03:13 crc kubenswrapper[4689]: I0123 11:03:13.029326 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:13 crc kubenswrapper[4689]: I0123 11:03:13.030092 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:13 crc kubenswrapper[4689]: I0123 11:03:13.073645 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:13 crc kubenswrapper[4689]: I0123 11:03:13.987358 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:14 crc kubenswrapper[4689]: I0123 11:03:14.047358 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qgt78"] Jan 23 11:03:15 crc kubenswrapper[4689]: I0123 11:03:15.963960 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qgt78" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="registry-server" containerID="cri-o://03ad1aec1e07f3b609c27cd653a90c3858d1d1e6feb33f81a190a994d3cd1091" gracePeriod=2 Jan 23 11:03:17 crc kubenswrapper[4689]: I0123 11:03:17.018227 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:03:17 crc kubenswrapper[4689]: I0123 11:03:17.072374 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pqn4q"] Jan 23 11:03:17 crc kubenswrapper[4689]: I0123 11:03:17.997197 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pqn4q" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="registry-server" containerID="cri-o://2bb2b41aba1e2dd0ec6a3c23b17f12523fa3aab6c335be3e6ebd9cb3fead4c42" gracePeriod=2 Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.008627 4689 generic.go:334] "Generic (PLEG): container finished" podID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerID="2bb2b41aba1e2dd0ec6a3c23b17f12523fa3aab6c335be3e6ebd9cb3fead4c42" exitCode=0 Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.008697 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqn4q" event={"ID":"4efc9b1e-b19b-4413-ba97-9aad796e0109","Type":"ContainerDied","Data":"2bb2b41aba1e2dd0ec6a3c23b17f12523fa3aab6c335be3e6ebd9cb3fead4c42"} Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.011796 4689 generic.go:334] "Generic (PLEG): container finished" podID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerID="03ad1aec1e07f3b609c27cd653a90c3858d1d1e6feb33f81a190a994d3cd1091" exitCode=0 Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.011818 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgt78" event={"ID":"a8ac71c7-1da1-4da4-95ab-493717e57798","Type":"ContainerDied","Data":"03ad1aec1e07f3b609c27cd653a90c3858d1d1e6feb33f81a190a994d3cd1091"} Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.316427 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.452170 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-catalog-content\") pod \"a8ac71c7-1da1-4da4-95ab-493717e57798\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.452280 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-utilities\") pod \"a8ac71c7-1da1-4da4-95ab-493717e57798\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.453563 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-utilities" (OuterVolumeSpecName: "utilities") pod "a8ac71c7-1da1-4da4-95ab-493717e57798" (UID: "a8ac71c7-1da1-4da4-95ab-493717e57798"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.453975 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbk5c\" (UniqueName: \"kubernetes.io/projected/a8ac71c7-1da1-4da4-95ab-493717e57798-kube-api-access-hbk5c\") pod \"a8ac71c7-1da1-4da4-95ab-493717e57798\" (UID: \"a8ac71c7-1da1-4da4-95ab-493717e57798\") " Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.454352 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.461189 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8ac71c7-1da1-4da4-95ab-493717e57798-kube-api-access-hbk5c" (OuterVolumeSpecName: "kube-api-access-hbk5c") pod "a8ac71c7-1da1-4da4-95ab-493717e57798" (UID: "a8ac71c7-1da1-4da4-95ab-493717e57798"). InnerVolumeSpecName "kube-api-access-hbk5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.496460 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.501220 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a8ac71c7-1da1-4da4-95ab-493717e57798" (UID: "a8ac71c7-1da1-4da4-95ab-493717e57798"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.556068 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbk5c\" (UniqueName: \"kubernetes.io/projected/a8ac71c7-1da1-4da4-95ab-493717e57798-kube-api-access-hbk5c\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.556349 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ac71c7-1da1-4da4-95ab-493717e57798-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.656918 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dh2s\" (UniqueName: \"kubernetes.io/projected/4efc9b1e-b19b-4413-ba97-9aad796e0109-kube-api-access-2dh2s\") pod \"4efc9b1e-b19b-4413-ba97-9aad796e0109\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.657048 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-utilities\") pod \"4efc9b1e-b19b-4413-ba97-9aad796e0109\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.657088 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-catalog-content\") pod \"4efc9b1e-b19b-4413-ba97-9aad796e0109\" (UID: \"4efc9b1e-b19b-4413-ba97-9aad796e0109\") " Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.658187 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-utilities" (OuterVolumeSpecName: "utilities") pod "4efc9b1e-b19b-4413-ba97-9aad796e0109" (UID: "4efc9b1e-b19b-4413-ba97-9aad796e0109"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.661111 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4efc9b1e-b19b-4413-ba97-9aad796e0109-kube-api-access-2dh2s" (OuterVolumeSpecName: "kube-api-access-2dh2s") pod "4efc9b1e-b19b-4413-ba97-9aad796e0109" (UID: "4efc9b1e-b19b-4413-ba97-9aad796e0109"). InnerVolumeSpecName "kube-api-access-2dh2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.667744 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.667815 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.734732 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4efc9b1e-b19b-4413-ba97-9aad796e0109" (UID: "4efc9b1e-b19b-4413-ba97-9aad796e0109"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.759479 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dh2s\" (UniqueName: \"kubernetes.io/projected/4efc9b1e-b19b-4413-ba97-9aad796e0109-kube-api-access-2dh2s\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.759514 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:19 crc kubenswrapper[4689]: I0123 11:03:19.759628 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4efc9b1e-b19b-4413-ba97-9aad796e0109-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.024295 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pqn4q" event={"ID":"4efc9b1e-b19b-4413-ba97-9aad796e0109","Type":"ContainerDied","Data":"f1af57a17dfa55110169550e129e9b0d939563c7a6cf4e720c340109f6f48ca3"} Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.024362 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pqn4q" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.024380 4689 scope.go:117] "RemoveContainer" containerID="2bb2b41aba1e2dd0ec6a3c23b17f12523fa3aab6c335be3e6ebd9cb3fead4c42" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.039190 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgt78" event={"ID":"a8ac71c7-1da1-4da4-95ab-493717e57798","Type":"ContainerDied","Data":"75404da9cbd9d42ae7e1aa690f73faf448948d83336f87fb9109765d49f69945"} Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.039296 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgt78" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.074806 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pqn4q"] Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.084567 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pqn4q"] Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.084810 4689 scope.go:117] "RemoveContainer" containerID="c437bb3ed45543cac0bd65ca3ca46eb906484825c38a180372acf6701359ac87" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.094681 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qgt78"] Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.099592 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qgt78"] Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.108906 4689 scope.go:117] "RemoveContainer" containerID="aa1b2b346165fb6338c437112ef94afb06b9ea61bf3498bd5c3345e04c8f3985" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.136840 4689 scope.go:117] "RemoveContainer" containerID="03ad1aec1e07f3b609c27cd653a90c3858d1d1e6feb33f81a190a994d3cd1091" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.162882 4689 scope.go:117] "RemoveContainer" containerID="30bc51b87cddbe581f73680564d7219d7bb523674b2d2fcf1fe9e3148960822c" Jan 23 11:03:20 crc kubenswrapper[4689]: I0123 11:03:20.184328 4689 scope.go:117] "RemoveContainer" containerID="1c7c093ac44f06c69e3d6999b0b3b8985ff69ced5e34bf4747995824c2e1ae6f" Jan 23 11:03:21 crc kubenswrapper[4689]: I0123 11:03:21.656124 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" path="/var/lib/kubelet/pods/4efc9b1e-b19b-4413-ba97-9aad796e0109/volumes" Jan 23 11:03:21 crc kubenswrapper[4689]: I0123 11:03:21.658170 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" path="/var/lib/kubelet/pods/a8ac71c7-1da1-4da4-95ab-493717e57798/volumes" Jan 23 11:03:29 crc kubenswrapper[4689]: I0123 11:03:29.664881 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Jan 23 11:03:29 crc kubenswrapper[4689]: I0123 11:03:29.665537 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 11:03:33 crc kubenswrapper[4689]: I0123 11:03:33.311453 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:03:33 crc kubenswrapper[4689]: I0123 11:03:33.311892 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:03:39 crc kubenswrapper[4689]: I0123 11:03:39.666454 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.650978 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-v5bxn"] Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.651915 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="registry-server" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.651934 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="registry-server" Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.651955 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="registry-server" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.651964 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="registry-server" Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.651975 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="extract-content" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.651983 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="extract-content" Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.651997 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="extract-utilities" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.652005 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="extract-utilities" Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.652016 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="extract-content" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.652024 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="extract-content" Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.652041 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="extract-utilities" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.652048 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="extract-utilities" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.652289 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="4efc9b1e-b19b-4413-ba97-9aad796e0109" containerName="registry-server" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.652316 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8ac71c7-1da1-4da4-95ab-493717e57798" containerName="registry-server" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.652896 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.655326 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.655518 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.656305 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.657275 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-2464w" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.657434 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.683829 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-v5bxn"] Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.686927 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.756782 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.756861 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.756892 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-token\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.756916 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/61d968a6-f51f-454a-97c8-72cc267aa5b2-datadir\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.756940 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-trusted-ca\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.756964 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.756993 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/61d968a6-f51f-454a-97c8-72cc267aa5b2-tmp\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.757030 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-sa-token\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.757060 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-entrypoint\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.757111 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58fwt\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-kube-api-access-58fwt\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.757182 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config-openshift-service-cacrt\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.841167 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-v5bxn"] Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.841769 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-58fwt metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-v5bxn" podUID="61d968a6-f51f-454a-97c8-72cc267aa5b2" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858249 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/61d968a6-f51f-454a-97c8-72cc267aa5b2-tmp\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858326 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-sa-token\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858367 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-entrypoint\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858417 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58fwt\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-kube-api-access-58fwt\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858476 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config-openshift-service-cacrt\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858523 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858552 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858578 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-token\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858603 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/61d968a6-f51f-454a-97c8-72cc267aa5b2-datadir\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858628 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-trusted-ca\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.858652 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.858802 4689 secret.go:188] Couldn't get secret openshift-logging/collector-metrics: secret "collector-metrics" not found Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.858867 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics podName:61d968a6-f51f-454a-97c8-72cc267aa5b2 nodeName:}" failed. No retries permitted until 2026-01-23 11:03:57.358846078 +0000 UTC m=+901.983525937 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics" (UniqueName: "kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics") pod "collector-v5bxn" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2") : secret "collector-metrics" not found Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.859035 4689 secret.go:188] Couldn't get secret openshift-logging/collector-syslog-receiver: secret "collector-syslog-receiver" not found Jan 23 11:03:56 crc kubenswrapper[4689]: E0123 11:03:56.859063 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver podName:61d968a6-f51f-454a-97c8-72cc267aa5b2 nodeName:}" failed. No retries permitted until 2026-01-23 11:03:57.359054703 +0000 UTC m=+901.983734562 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "collector-syslog-receiver" (UniqueName: "kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver") pod "collector-v5bxn" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2") : secret "collector-syslog-receiver" not found Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.859464 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.859676 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config-openshift-service-cacrt\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.859755 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/61d968a6-f51f-454a-97c8-72cc267aa5b2-datadir\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.859776 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-entrypoint\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.860280 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-trusted-ca\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.866752 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-token\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.874474 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/61d968a6-f51f-454a-97c8-72cc267aa5b2-tmp\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.877899 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58fwt\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-kube-api-access-58fwt\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:56 crc kubenswrapper[4689]: I0123 11:03:56.878286 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-sa-token\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.348474 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-v5bxn" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.358807 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-v5bxn" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.367852 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.368072 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.373625 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.376800 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics\") pod \"collector-v5bxn\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " pod="openshift-logging/collector-v5bxn" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.469109 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-entrypoint\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.469516 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-token\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.469680 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.469901 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.470056 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-sa-token\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.470257 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/61d968a6-f51f-454a-97c8-72cc267aa5b2-datadir\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.470600 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config-openshift-service-cacrt\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.470758 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-trusted-ca\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.470920 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58fwt\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-kube-api-access-58fwt\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.471080 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.471348 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/61d968a6-f51f-454a-97c8-72cc267aa5b2-tmp\") pod \"61d968a6-f51f-454a-97c8-72cc267aa5b2\" (UID: \"61d968a6-f51f-454a-97c8-72cc267aa5b2\") " Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.469594 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.471091 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/61d968a6-f51f-454a-97c8-72cc267aa5b2-datadir" (OuterVolumeSpecName: "datadir") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.471881 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config" (OuterVolumeSpecName: "config") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.473842 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.474557 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-token" (OuterVolumeSpecName: "collector-token") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.474567 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics" (OuterVolumeSpecName: "metrics") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.475139 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.475718 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61d968a6-f51f-454a-97c8-72cc267aa5b2-tmp" (OuterVolumeSpecName: "tmp") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.476107 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-sa-token" (OuterVolumeSpecName: "sa-token") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.477453 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-kube-api-access-58fwt" (OuterVolumeSpecName: "kube-api-access-58fwt") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "kube-api-access-58fwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.478785 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "61d968a6-f51f-454a-97c8-72cc267aa5b2" (UID: "61d968a6-f51f-454a-97c8-72cc267aa5b2"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573688 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573729 4689 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573741 4689 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/61d968a6-f51f-454a-97c8-72cc267aa5b2-datadir\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573753 4689 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573792 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573803 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58fwt\" (UniqueName: \"kubernetes.io/projected/61d968a6-f51f-454a-97c8-72cc267aa5b2-kube-api-access-58fwt\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573814 4689 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573823 4689 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/61d968a6-f51f-454a-97c8-72cc267aa5b2-tmp\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573831 4689 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/61d968a6-f51f-454a-97c8-72cc267aa5b2-entrypoint\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573838 4689 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-metrics\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:57 crc kubenswrapper[4689]: I0123 11:03:57.573846 4689 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/61d968a6-f51f-454a-97c8-72cc267aa5b2-collector-token\") on node \"crc\" DevicePath \"\"" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.355956 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-v5bxn" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.402548 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-v5bxn"] Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.417192 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-v5bxn"] Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.425576 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-t2cnk"] Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.426521 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.433747 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.433952 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.434172 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.434390 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-2464w" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.436183 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-t2cnk"] Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.436595 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.437707 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595397 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/388e3d73-e449-4fdc-9ba2-47b55a360c92-datadir\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595448 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96flk\" (UniqueName: \"kubernetes.io/projected/388e3d73-e449-4fdc-9ba2-47b55a360c92-kube-api-access-96flk\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595473 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-entrypoint\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-collector-syslog-receiver\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595536 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-metrics\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595646 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-config\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595663 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/388e3d73-e449-4fdc-9ba2-47b55a360c92-tmp\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595723 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-trusted-ca\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595757 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/388e3d73-e449-4fdc-9ba2-47b55a360c92-sa-token\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595806 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-config-openshift-service-cacrt\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.595857 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-collector-token\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697604 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-config-openshift-service-cacrt\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697701 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-collector-token\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697753 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/388e3d73-e449-4fdc-9ba2-47b55a360c92-datadir\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697788 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96flk\" (UniqueName: \"kubernetes.io/projected/388e3d73-e449-4fdc-9ba2-47b55a360c92-kube-api-access-96flk\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697818 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-entrypoint\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697845 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-collector-syslog-receiver\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697876 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-metrics\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697923 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/388e3d73-e449-4fdc-9ba2-47b55a360c92-tmp\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.697944 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-config\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.698025 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-trusted-ca\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.698091 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/388e3d73-e449-4fdc-9ba2-47b55a360c92-sa-token\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.698624 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-config-openshift-service-cacrt\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.699421 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-entrypoint\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.699435 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-config\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.699928 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/388e3d73-e449-4fdc-9ba2-47b55a360c92-datadir\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.700082 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/388e3d73-e449-4fdc-9ba2-47b55a360c92-trusted-ca\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.701839 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-metrics\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.701952 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-collector-syslog-receiver\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.702165 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/388e3d73-e449-4fdc-9ba2-47b55a360c92-collector-token\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.703228 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/388e3d73-e449-4fdc-9ba2-47b55a360c92-tmp\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.716735 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/388e3d73-e449-4fdc-9ba2-47b55a360c92-sa-token\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.719649 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96flk\" (UniqueName: \"kubernetes.io/projected/388e3d73-e449-4fdc-9ba2-47b55a360c92-kube-api-access-96flk\") pod \"collector-t2cnk\" (UID: \"388e3d73-e449-4fdc-9ba2-47b55a360c92\") " pod="openshift-logging/collector-t2cnk" Jan 23 11:03:58 crc kubenswrapper[4689]: I0123 11:03:58.744629 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-t2cnk" Jan 23 11:03:59 crc kubenswrapper[4689]: I0123 11:03:59.158364 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-t2cnk"] Jan 23 11:03:59 crc kubenswrapper[4689]: I0123 11:03:59.367256 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-t2cnk" event={"ID":"388e3d73-e449-4fdc-9ba2-47b55a360c92","Type":"ContainerStarted","Data":"44459686d055d98e67035f2b76f70b875f51c4d9e57e62cd87206a9fe180aedd"} Jan 23 11:03:59 crc kubenswrapper[4689]: I0123 11:03:59.650687 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61d968a6-f51f-454a-97c8-72cc267aa5b2" path="/var/lib/kubelet/pods/61d968a6-f51f-454a-97c8-72cc267aa5b2/volumes" Jan 23 11:04:03 crc kubenswrapper[4689]: I0123 11:04:03.310717 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:04:03 crc kubenswrapper[4689]: I0123 11:04:03.311002 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:04:11 crc kubenswrapper[4689]: I0123 11:04:11.452773 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-t2cnk" event={"ID":"388e3d73-e449-4fdc-9ba2-47b55a360c92","Type":"ContainerStarted","Data":"d20ba102c84aa7ddeec1d3471555bd019ddd0c2e30d9062d60d9408729ebf83a"} Jan 23 11:04:11 crc kubenswrapper[4689]: I0123 11:04:11.475349 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-t2cnk" podStartSLOduration=2.329091905 podStartE2EDuration="13.47532586s" podCreationTimestamp="2026-01-23 11:03:58 +0000 UTC" firstStartedPulling="2026-01-23 11:03:59.160788352 +0000 UTC m=+903.785468211" lastFinishedPulling="2026-01-23 11:04:10.307022317 +0000 UTC m=+914.931702166" observedRunningTime="2026-01-23 11:04:11.47207637 +0000 UTC m=+916.096756229" watchObservedRunningTime="2026-01-23 11:04:11.47532586 +0000 UTC m=+916.100005719" Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.311592 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.312314 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.312365 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.313131 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c7ede398ad329e3d1da033621f66a70a5e2c9501cd8a9e0138db79e18cff983d"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.313205 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://c7ede398ad329e3d1da033621f66a70a5e2c9501cd8a9e0138db79e18cff983d" gracePeriod=600 Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.637603 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="c7ede398ad329e3d1da033621f66a70a5e2c9501cd8a9e0138db79e18cff983d" exitCode=0 Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.637695 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"c7ede398ad329e3d1da033621f66a70a5e2c9501cd8a9e0138db79e18cff983d"} Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.637937 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"d3cefa8656f04de15341e4eb597dc2badbed1ac4c35f5187204a360ab5c0ac81"} Jan 23 11:04:33 crc kubenswrapper[4689]: I0123 11:04:33.637959 4689 scope.go:117] "RemoveContainer" containerID="6c7330462c260e571fdbe25c842509e5da85ef7832a1583f72e3e82ae187dabf" Jan 23 11:04:38 crc kubenswrapper[4689]: I0123 11:04:38.861110 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd"] Jan 23 11:04:38 crc kubenswrapper[4689]: I0123 11:04:38.862894 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:38 crc kubenswrapper[4689]: I0123 11:04:38.865022 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 23 11:04:38 crc kubenswrapper[4689]: I0123 11:04:38.882467 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd"] Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.026431 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttkpn\" (UniqueName: \"kubernetes.io/projected/755f37c8-04c1-462c-bcde-fec84986a51a-kube-api-access-ttkpn\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.026488 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.026513 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.127663 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttkpn\" (UniqueName: \"kubernetes.io/projected/755f37c8-04c1-462c-bcde-fec84986a51a-kube-api-access-ttkpn\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.127735 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.127756 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.128174 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.128702 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.159546 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttkpn\" (UniqueName: \"kubernetes.io/projected/755f37c8-04c1-462c-bcde-fec84986a51a-kube-api-access-ttkpn\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.180675 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.617714 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd"] Jan 23 11:04:39 crc kubenswrapper[4689]: I0123 11:04:39.689336 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" event={"ID":"755f37c8-04c1-462c-bcde-fec84986a51a","Type":"ContainerStarted","Data":"eb49b194fd2cda27370db2c5067e7b3b1bfcb98818b32c559498157efe0956b7"} Jan 23 11:04:41 crc kubenswrapper[4689]: I0123 11:04:41.707066 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" event={"ID":"755f37c8-04c1-462c-bcde-fec84986a51a","Type":"ContainerStarted","Data":"9b82f06f09f10587e5138459408a4e53a71eab7765288c59b9956613fd9633ba"} Jan 23 11:04:43 crc kubenswrapper[4689]: I0123 11:04:43.723223 4689 generic.go:334] "Generic (PLEG): container finished" podID="755f37c8-04c1-462c-bcde-fec84986a51a" containerID="9b82f06f09f10587e5138459408a4e53a71eab7765288c59b9956613fd9633ba" exitCode=0 Jan 23 11:04:43 crc kubenswrapper[4689]: I0123 11:04:43.723289 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" event={"ID":"755f37c8-04c1-462c-bcde-fec84986a51a","Type":"ContainerDied","Data":"9b82f06f09f10587e5138459408a4e53a71eab7765288c59b9956613fd9633ba"} Jan 23 11:04:52 crc kubenswrapper[4689]: I0123 11:04:52.788019 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" event={"ID":"755f37c8-04c1-462c-bcde-fec84986a51a","Type":"ContainerStarted","Data":"5bba3fb226cc3db3018f75f94f7cb7c8d045fc419f567ff73743ed2d86fbc7ef"} Jan 23 11:04:53 crc kubenswrapper[4689]: I0123 11:04:53.795810 4689 generic.go:334] "Generic (PLEG): container finished" podID="755f37c8-04c1-462c-bcde-fec84986a51a" containerID="5bba3fb226cc3db3018f75f94f7cb7c8d045fc419f567ff73743ed2d86fbc7ef" exitCode=0 Jan 23 11:04:53 crc kubenswrapper[4689]: I0123 11:04:53.795858 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" event={"ID":"755f37c8-04c1-462c-bcde-fec84986a51a","Type":"ContainerDied","Data":"5bba3fb226cc3db3018f75f94f7cb7c8d045fc419f567ff73743ed2d86fbc7ef"} Jan 23 11:04:54 crc kubenswrapper[4689]: I0123 11:04:54.805364 4689 generic.go:334] "Generic (PLEG): container finished" podID="755f37c8-04c1-462c-bcde-fec84986a51a" containerID="a1cb9f05deaab5054fb4f43d7e16d37ec5f1c4b9d23839ce307f4d2354178553" exitCode=0 Jan 23 11:04:54 crc kubenswrapper[4689]: I0123 11:04:54.805505 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" event={"ID":"755f37c8-04c1-462c-bcde-fec84986a51a","Type":"ContainerDied","Data":"a1cb9f05deaab5054fb4f43d7e16d37ec5f1c4b9d23839ce307f4d2354178553"} Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.077757 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.122398 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-bundle\") pod \"755f37c8-04c1-462c-bcde-fec84986a51a\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.123056 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-util\") pod \"755f37c8-04c1-462c-bcde-fec84986a51a\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.123667 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-bundle" (OuterVolumeSpecName: "bundle") pod "755f37c8-04c1-462c-bcde-fec84986a51a" (UID: "755f37c8-04c1-462c-bcde-fec84986a51a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.123976 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttkpn\" (UniqueName: \"kubernetes.io/projected/755f37c8-04c1-462c-bcde-fec84986a51a-kube-api-access-ttkpn\") pod \"755f37c8-04c1-462c-bcde-fec84986a51a\" (UID: \"755f37c8-04c1-462c-bcde-fec84986a51a\") " Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.125548 4689 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.136079 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-util" (OuterVolumeSpecName: "util") pod "755f37c8-04c1-462c-bcde-fec84986a51a" (UID: "755f37c8-04c1-462c-bcde-fec84986a51a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.137568 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/755f37c8-04c1-462c-bcde-fec84986a51a-kube-api-access-ttkpn" (OuterVolumeSpecName: "kube-api-access-ttkpn") pod "755f37c8-04c1-462c-bcde-fec84986a51a" (UID: "755f37c8-04c1-462c-bcde-fec84986a51a"). InnerVolumeSpecName "kube-api-access-ttkpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.227573 4689 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/755f37c8-04c1-462c-bcde-fec84986a51a-util\") on node \"crc\" DevicePath \"\"" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.227646 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttkpn\" (UniqueName: \"kubernetes.io/projected/755f37c8-04c1-462c-bcde-fec84986a51a-kube-api-access-ttkpn\") on node \"crc\" DevicePath \"\"" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.820750 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" event={"ID":"755f37c8-04c1-462c-bcde-fec84986a51a","Type":"ContainerDied","Data":"eb49b194fd2cda27370db2c5067e7b3b1bfcb98818b32c559498157efe0956b7"} Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.821081 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb49b194fd2cda27370db2c5067e7b3b1bfcb98818b32c559498157efe0956b7" Jan 23 11:04:56 crc kubenswrapper[4689]: I0123 11:04:56.820834 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.898460 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-84f2p"] Jan 23 11:05:00 crc kubenswrapper[4689]: E0123 11:05:00.899324 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="755f37c8-04c1-462c-bcde-fec84986a51a" containerName="extract" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.899342 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="755f37c8-04c1-462c-bcde-fec84986a51a" containerName="extract" Jan 23 11:05:00 crc kubenswrapper[4689]: E0123 11:05:00.899354 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="755f37c8-04c1-462c-bcde-fec84986a51a" containerName="util" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.899361 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="755f37c8-04c1-462c-bcde-fec84986a51a" containerName="util" Jan 23 11:05:00 crc kubenswrapper[4689]: E0123 11:05:00.899372 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="755f37c8-04c1-462c-bcde-fec84986a51a" containerName="pull" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.899379 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="755f37c8-04c1-462c-bcde-fec84986a51a" containerName="pull" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.899544 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="755f37c8-04c1-462c-bcde-fec84986a51a" containerName="extract" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.900166 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.902267 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-gzdhf" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.903557 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.904055 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 23 11:05:00 crc kubenswrapper[4689]: I0123 11:05:00.911403 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-84f2p"] Jan 23 11:05:01 crc kubenswrapper[4689]: I0123 11:05:01.092348 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk59v\" (UniqueName: \"kubernetes.io/projected/5d52f3fd-b543-410b-acc6-348a9c684ee2-kube-api-access-jk59v\") pod \"nmstate-operator-646758c888-84f2p\" (UID: \"5d52f3fd-b543-410b-acc6-348a9c684ee2\") " pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" Jan 23 11:05:01 crc kubenswrapper[4689]: I0123 11:05:01.193536 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk59v\" (UniqueName: \"kubernetes.io/projected/5d52f3fd-b543-410b-acc6-348a9c684ee2-kube-api-access-jk59v\") pod \"nmstate-operator-646758c888-84f2p\" (UID: \"5d52f3fd-b543-410b-acc6-348a9c684ee2\") " pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" Jan 23 11:05:01 crc kubenswrapper[4689]: I0123 11:05:01.213340 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk59v\" (UniqueName: \"kubernetes.io/projected/5d52f3fd-b543-410b-acc6-348a9c684ee2-kube-api-access-jk59v\") pod \"nmstate-operator-646758c888-84f2p\" (UID: \"5d52f3fd-b543-410b-acc6-348a9c684ee2\") " pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" Jan 23 11:05:01 crc kubenswrapper[4689]: I0123 11:05:01.224216 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" Jan 23 11:05:01 crc kubenswrapper[4689]: I0123 11:05:01.723135 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-84f2p"] Jan 23 11:05:01 crc kubenswrapper[4689]: W0123 11:05:01.727012 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d52f3fd_b543_410b_acc6_348a9c684ee2.slice/crio-36e9276f3338ba29fce8335efcb3930b908cdd3ea6a8e4cd68933dfb14abad4b WatchSource:0}: Error finding container 36e9276f3338ba29fce8335efcb3930b908cdd3ea6a8e4cd68933dfb14abad4b: Status 404 returned error can't find the container with id 36e9276f3338ba29fce8335efcb3930b908cdd3ea6a8e4cd68933dfb14abad4b Jan 23 11:05:01 crc kubenswrapper[4689]: I0123 11:05:01.852256 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" event={"ID":"5d52f3fd-b543-410b-acc6-348a9c684ee2","Type":"ContainerStarted","Data":"36e9276f3338ba29fce8335efcb3930b908cdd3ea6a8e4cd68933dfb14abad4b"} Jan 23 11:05:05 crc kubenswrapper[4689]: I0123 11:05:05.882548 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" event={"ID":"5d52f3fd-b543-410b-acc6-348a9c684ee2","Type":"ContainerStarted","Data":"3b54dd726ef16c744f578049d3907650a33cc37588d85e8ea9c7a61d7276b719"} Jan 23 11:05:05 crc kubenswrapper[4689]: I0123 11:05:05.898165 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-84f2p" podStartSLOduration=2.886919897 podStartE2EDuration="5.898124742s" podCreationTimestamp="2026-01-23 11:05:00 +0000 UTC" firstStartedPulling="2026-01-23 11:05:01.729421677 +0000 UTC m=+966.354101536" lastFinishedPulling="2026-01-23 11:05:04.740626522 +0000 UTC m=+969.365306381" observedRunningTime="2026-01-23 11:05:05.895397306 +0000 UTC m=+970.520077165" watchObservedRunningTime="2026-01-23 11:05:05.898124742 +0000 UTC m=+970.522804601" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.179469 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-7xhq4"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.181071 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.184287 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-zgw4l" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.186823 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.187729 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.189390 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.192386 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-7xhq4"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.206861 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.217257 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-z5sn9"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.218451 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.256932 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28j2f\" (UniqueName: \"kubernetes.io/projected/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-kube-api-access-28j2f\") pod \"nmstate-webhook-8474b5b9d8-2r5kt\" (UID: \"83cfd8ec-2928-4cd8-a14c-330cce17bfd5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.256984 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rnhq\" (UniqueName: \"kubernetes.io/projected/cdc8775f-209a-4342-83fb-78612d37b22b-kube-api-access-6rnhq\") pod \"nmstate-metrics-54757c584b-7xhq4\" (UID: \"cdc8775f-209a-4342-83fb-78612d37b22b\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.257019 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-dbus-socket\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.257199 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czl9c\" (UniqueName: \"kubernetes.io/projected/90c7af03-d2b6-45ef-b228-d5621bf1f671-kube-api-access-czl9c\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.257285 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-2r5kt\" (UID: \"83cfd8ec-2928-4cd8-a14c-330cce17bfd5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.257317 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-ovs-socket\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.257349 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-nmstate-lock\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.349473 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.350551 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358315 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-dbus-socket\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358382 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358426 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czl9c\" (UniqueName: \"kubernetes.io/projected/90c7af03-d2b6-45ef-b228-d5621bf1f671-kube-api-access-czl9c\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358459 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-2r5kt\" (UID: \"83cfd8ec-2928-4cd8-a14c-330cce17bfd5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358479 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-ovs-socket\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358503 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-nmstate-lock\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358526 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358547 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28j2f\" (UniqueName: \"kubernetes.io/projected/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-kube-api-access-28j2f\") pod \"nmstate-webhook-8474b5b9d8-2r5kt\" (UID: \"83cfd8ec-2928-4cd8-a14c-330cce17bfd5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358563 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rnhq\" (UniqueName: \"kubernetes.io/projected/cdc8775f-209a-4342-83fb-78612d37b22b-kube-api-access-6rnhq\") pod \"nmstate-metrics-54757c584b-7xhq4\" (UID: \"cdc8775f-209a-4342-83fb-78612d37b22b\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.358579 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmtjp\" (UniqueName: \"kubernetes.io/projected/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-kube-api-access-wmtjp\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.359073 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-dbus-socket\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: E0123 11:05:10.359412 4689 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 23 11:05:10 crc kubenswrapper[4689]: E0123 11:05:10.359449 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-tls-key-pair podName:83cfd8ec-2928-4cd8-a14c-330cce17bfd5 nodeName:}" failed. No retries permitted until 2026-01-23 11:05:10.859436013 +0000 UTC m=+975.484115872 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-2r5kt" (UID: "83cfd8ec-2928-4cd8-a14c-330cce17bfd5") : secret "openshift-nmstate-webhook" not found Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.359576 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-ovs-socket\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.359606 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/90c7af03-d2b6-45ef-b228-d5621bf1f671-nmstate-lock\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.359985 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.360106 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.360303 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-48fjm" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.370623 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.394397 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rnhq\" (UniqueName: \"kubernetes.io/projected/cdc8775f-209a-4342-83fb-78612d37b22b-kube-api-access-6rnhq\") pod \"nmstate-metrics-54757c584b-7xhq4\" (UID: \"cdc8775f-209a-4342-83fb-78612d37b22b\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.398616 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czl9c\" (UniqueName: \"kubernetes.io/projected/90c7af03-d2b6-45ef-b228-d5621bf1f671-kube-api-access-czl9c\") pod \"nmstate-handler-z5sn9\" (UID: \"90c7af03-d2b6-45ef-b228-d5621bf1f671\") " pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.400274 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28j2f\" (UniqueName: \"kubernetes.io/projected/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-kube-api-access-28j2f\") pod \"nmstate-webhook-8474b5b9d8-2r5kt\" (UID: \"83cfd8ec-2928-4cd8-a14c-330cce17bfd5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.459364 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.459412 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmtjp\" (UniqueName: \"kubernetes.io/projected/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-kube-api-access-wmtjp\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.459465 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: E0123 11:05:10.459590 4689 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 23 11:05:10 crc kubenswrapper[4689]: E0123 11:05:10.459633 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-plugin-serving-cert podName:f2a5d877-b98d-41b6-8686-f55b2ef8b34b nodeName:}" failed. No retries permitted until 2026-01-23 11:05:10.959620116 +0000 UTC m=+975.584299975 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-7hw66" (UID: "f2a5d877-b98d-41b6-8686-f55b2ef8b34b") : secret "plugin-serving-cert" not found Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.460734 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.486810 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmtjp\" (UniqueName: \"kubernetes.io/projected/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-kube-api-access-wmtjp\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.506952 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.538627 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.544588 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7dddff5c78-d9qgh"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.545460 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.574862 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dddff5c78-d9qgh"] Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.667655 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-oauth-serving-cert\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.667758 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-config\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.667793 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-service-ca\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.667868 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-serving-cert\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.667888 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-trusted-ca-bundle\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.667927 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvbd8\" (UniqueName: \"kubernetes.io/projected/7525f02f-3c52-4e75-bace-0d3e1bedeee8-kube-api-access-xvbd8\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.667948 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-oauth-config\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.769793 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-config\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.770086 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-service-ca\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.770185 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-serving-cert\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.770217 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-trusted-ca-bundle\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.770247 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvbd8\" (UniqueName: \"kubernetes.io/projected/7525f02f-3c52-4e75-bace-0d3e1bedeee8-kube-api-access-xvbd8\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.770281 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-oauth-config\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.770325 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-oauth-serving-cert\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.770577 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-config\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.771139 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-service-ca\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.771167 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-oauth-serving-cert\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.771310 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-trusted-ca-bundle\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.776525 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-oauth-config\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.776545 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-serving-cert\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.786562 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvbd8\" (UniqueName: \"kubernetes.io/projected/7525f02f-3c52-4e75-bace-0d3e1bedeee8-kube-api-access-xvbd8\") pod \"console-7dddff5c78-d9qgh\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.871812 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-2r5kt\" (UID: \"83cfd8ec-2928-4cd8-a14c-330cce17bfd5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.875904 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/83cfd8ec-2928-4cd8-a14c-330cce17bfd5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-2r5kt\" (UID: \"83cfd8ec-2928-4cd8-a14c-330cce17bfd5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.911969 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.923032 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-z5sn9" event={"ID":"90c7af03-d2b6-45ef-b228-d5621bf1f671","Type":"ContainerStarted","Data":"44ad982a173fb1aa1bce5f62f98b87fe41f2355443b4fd6630fcb27a166a3553"} Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.972902 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:10 crc kubenswrapper[4689]: I0123 11:05:10.976964 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a5d877-b98d-41b6-8686-f55b2ef8b34b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-7hw66\" (UID: \"f2a5d877-b98d-41b6-8686-f55b2ef8b34b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:11 crc kubenswrapper[4689]: W0123 11:05:11.017215 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcdc8775f_209a_4342_83fb_78612d37b22b.slice/crio-f25222634c5e9388742e6bf3c9fd0007994f6bc7f0c85f032a2613bcfe48d04d WatchSource:0}: Error finding container f25222634c5e9388742e6bf3c9fd0007994f6bc7f0c85f032a2613bcfe48d04d: Status 404 returned error can't find the container with id f25222634c5e9388742e6bf3c9fd0007994f6bc7f0c85f032a2613bcfe48d04d Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.018313 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-7xhq4"] Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.116808 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.277481 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.415009 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dddff5c78-d9qgh"] Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.434636 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt"] Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.790118 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66"] Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.958041 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" event={"ID":"cdc8775f-209a-4342-83fb-78612d37b22b","Type":"ContainerStarted","Data":"f25222634c5e9388742e6bf3c9fd0007994f6bc7f0c85f032a2613bcfe48d04d"} Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.959614 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dddff5c78-d9qgh" event={"ID":"7525f02f-3c52-4e75-bace-0d3e1bedeee8","Type":"ContainerStarted","Data":"1d3c4f56fa132b794c20a0fa78b85fcfe63ddb572740091b0c426c19d73fa68d"} Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.959676 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dddff5c78-d9qgh" event={"ID":"7525f02f-3c52-4e75-bace-0d3e1bedeee8","Type":"ContainerStarted","Data":"8930d72675bd847372ab585da2567cfc55b301bfbda38d8c141834eb0faffd65"} Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.961988 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" event={"ID":"f2a5d877-b98d-41b6-8686-f55b2ef8b34b","Type":"ContainerStarted","Data":"6ed8ddf52ecd5229302fa890f440793b6d8d08318b89c2a9c05a4ed74c484faf"} Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.972994 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" event={"ID":"83cfd8ec-2928-4cd8-a14c-330cce17bfd5","Type":"ContainerStarted","Data":"2b4cb42136bdec3ca94132370d9dea0d10312a432b9fac7cf95e2ba38c3c7414"} Jan 23 11:05:11 crc kubenswrapper[4689]: I0123 11:05:11.991202 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7dddff5c78-d9qgh" podStartSLOduration=1.9911866379999998 podStartE2EDuration="1.991186638s" podCreationTimestamp="2026-01-23 11:05:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:05:11.983707393 +0000 UTC m=+976.608387242" watchObservedRunningTime="2026-01-23 11:05:11.991186638 +0000 UTC m=+976.615866497" Jan 23 11:05:13 crc kubenswrapper[4689]: I0123 11:05:13.989080 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-z5sn9" event={"ID":"90c7af03-d2b6-45ef-b228-d5621bf1f671","Type":"ContainerStarted","Data":"f93425f16939da68c76c65d3b9b206fa12e70b717681b46f252988f322104798"} Jan 23 11:05:13 crc kubenswrapper[4689]: I0123 11:05:13.989668 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:13 crc kubenswrapper[4689]: I0123 11:05:13.991878 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" event={"ID":"cdc8775f-209a-4342-83fb-78612d37b22b","Type":"ContainerStarted","Data":"e01c030f675483566da8d1ed6e62884276ec2a0be74343b2145f1d16484f6069"} Jan 23 11:05:13 crc kubenswrapper[4689]: I0123 11:05:13.993577 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" event={"ID":"83cfd8ec-2928-4cd8-a14c-330cce17bfd5","Type":"ContainerStarted","Data":"b52561c1599d96afe18b93817c638bba49699c0034ece7d609f3276c6cb007c5"} Jan 23 11:05:13 crc kubenswrapper[4689]: I0123 11:05:13.993844 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:14 crc kubenswrapper[4689]: I0123 11:05:14.007527 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-z5sn9" podStartSLOduration=1.5125775240000001 podStartE2EDuration="4.007508107s" podCreationTimestamp="2026-01-23 11:05:10 +0000 UTC" firstStartedPulling="2026-01-23 11:05:10.611200381 +0000 UTC m=+975.235880240" lastFinishedPulling="2026-01-23 11:05:13.106130964 +0000 UTC m=+977.730810823" observedRunningTime="2026-01-23 11:05:14.004250051 +0000 UTC m=+978.628929910" watchObservedRunningTime="2026-01-23 11:05:14.007508107 +0000 UTC m=+978.632187976" Jan 23 11:05:14 crc kubenswrapper[4689]: I0123 11:05:14.024577 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" podStartSLOduration=2.386543074 podStartE2EDuration="4.024554065s" podCreationTimestamp="2026-01-23 11:05:10 +0000 UTC" firstStartedPulling="2026-01-23 11:05:11.469551407 +0000 UTC m=+976.094231256" lastFinishedPulling="2026-01-23 11:05:13.107562388 +0000 UTC m=+977.732242247" observedRunningTime="2026-01-23 11:05:14.022464377 +0000 UTC m=+978.647144236" watchObservedRunningTime="2026-01-23 11:05:14.024554065 +0000 UTC m=+978.649233924" Jan 23 11:05:16 crc kubenswrapper[4689]: I0123 11:05:16.056524 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" event={"ID":"f2a5d877-b98d-41b6-8686-f55b2ef8b34b","Type":"ContainerStarted","Data":"33e45b19494bdb0054aaaf64a76871deae0c6332452ec99332adb205b5f65ba0"} Jan 23 11:05:16 crc kubenswrapper[4689]: I0123 11:05:16.078432 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-7hw66" podStartSLOduration=3.014298126 podStartE2EDuration="6.078411592s" podCreationTimestamp="2026-01-23 11:05:10 +0000 UTC" firstStartedPulling="2026-01-23 11:05:11.800870256 +0000 UTC m=+976.425550115" lastFinishedPulling="2026-01-23 11:05:14.864983722 +0000 UTC m=+979.489663581" observedRunningTime="2026-01-23 11:05:16.076439877 +0000 UTC m=+980.701119736" watchObservedRunningTime="2026-01-23 11:05:16.078411592 +0000 UTC m=+980.703091451" Jan 23 11:05:17 crc kubenswrapper[4689]: I0123 11:05:17.065674 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" event={"ID":"cdc8775f-209a-4342-83fb-78612d37b22b","Type":"ContainerStarted","Data":"a162f8c22d61d7c13f15005d170d34bb2d13565524894714dd2d6c1323d3c079"} Jan 23 11:05:17 crc kubenswrapper[4689]: I0123 11:05:17.084300 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-7xhq4" podStartSLOduration=1.4470277 podStartE2EDuration="7.084278788s" podCreationTimestamp="2026-01-23 11:05:10 +0000 UTC" firstStartedPulling="2026-01-23 11:05:11.019603253 +0000 UTC m=+975.644283112" lastFinishedPulling="2026-01-23 11:05:16.656854321 +0000 UTC m=+981.281534200" observedRunningTime="2026-01-23 11:05:17.083660904 +0000 UTC m=+981.708340753" watchObservedRunningTime="2026-01-23 11:05:17.084278788 +0000 UTC m=+981.708958647" Jan 23 11:05:20 crc kubenswrapper[4689]: I0123 11:05:20.563163 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 11:05:20 crc kubenswrapper[4689]: I0123 11:05:20.912819 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:20 crc kubenswrapper[4689]: I0123 11:05:20.913183 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:20 crc kubenswrapper[4689]: I0123 11:05:20.921612 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:21 crc kubenswrapper[4689]: I0123 11:05:21.101452 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:05:21 crc kubenswrapper[4689]: I0123 11:05:21.166389 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-6cb59b46c-trq59"] Jan 23 11:05:31 crc kubenswrapper[4689]: I0123 11:05:31.135010 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 11:05:46 crc kubenswrapper[4689]: I0123 11:05:46.224269 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-6cb59b46c-trq59" podUID="75f5477a-8c1c-4d7d-b6b5-373a337d3642" containerName="console" containerID="cri-o://2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d" gracePeriod=15 Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.136925 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-6cb59b46c-trq59_75f5477a-8c1c-4d7d-b6b5-373a337d3642/console/0.log" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.138003 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.160113 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-oauth-config\") pod \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.160538 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-trusted-ca-bundle\") pod \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.160652 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-config\") pod \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.161762 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-serving-cert\") pod \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.161539 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "75f5477a-8c1c-4d7d-b6b5-373a337d3642" (UID: "75f5477a-8c1c-4d7d-b6b5-373a337d3642"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.161691 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-config" (OuterVolumeSpecName: "console-config") pod "75f5477a-8c1c-4d7d-b6b5-373a337d3642" (UID: "75f5477a-8c1c-4d7d-b6b5-373a337d3642"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.162309 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqlzd\" (UniqueName: \"kubernetes.io/projected/75f5477a-8c1c-4d7d-b6b5-373a337d3642-kube-api-access-bqlzd\") pod \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.162486 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-oauth-serving-cert\") pod \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.162619 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-service-ca\") pod \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\" (UID: \"75f5477a-8c1c-4d7d-b6b5-373a337d3642\") " Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.163338 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-service-ca" (OuterVolumeSpecName: "service-ca") pod "75f5477a-8c1c-4d7d-b6b5-373a337d3642" (UID: "75f5477a-8c1c-4d7d-b6b5-373a337d3642"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.163460 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "75f5477a-8c1c-4d7d-b6b5-373a337d3642" (UID: "75f5477a-8c1c-4d7d-b6b5-373a337d3642"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.163786 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.163806 4689 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.163816 4689 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.163826 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75f5477a-8c1c-4d7d-b6b5-373a337d3642-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.198034 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "75f5477a-8c1c-4d7d-b6b5-373a337d3642" (UID: "75f5477a-8c1c-4d7d-b6b5-373a337d3642"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.204846 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75f5477a-8c1c-4d7d-b6b5-373a337d3642-kube-api-access-bqlzd" (OuterVolumeSpecName: "kube-api-access-bqlzd") pod "75f5477a-8c1c-4d7d-b6b5-373a337d3642" (UID: "75f5477a-8c1c-4d7d-b6b5-373a337d3642"). InnerVolumeSpecName "kube-api-access-bqlzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.204856 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "75f5477a-8c1c-4d7d-b6b5-373a337d3642" (UID: "75f5477a-8c1c-4d7d-b6b5-373a337d3642"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.266363 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqlzd\" (UniqueName: \"kubernetes.io/projected/75f5477a-8c1c-4d7d-b6b5-373a337d3642-kube-api-access-bqlzd\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.266405 4689 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.266414 4689 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75f5477a-8c1c-4d7d-b6b5-373a337d3642-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.361067 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-6cb59b46c-trq59_75f5477a-8c1c-4d7d-b6b5-373a337d3642/console/0.log" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.361117 4689 generic.go:334] "Generic (PLEG): container finished" podID="75f5477a-8c1c-4d7d-b6b5-373a337d3642" containerID="2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d" exitCode=2 Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.361151 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6cb59b46c-trq59" event={"ID":"75f5477a-8c1c-4d7d-b6b5-373a337d3642","Type":"ContainerDied","Data":"2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d"} Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.361196 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6cb59b46c-trq59" event={"ID":"75f5477a-8c1c-4d7d-b6b5-373a337d3642","Type":"ContainerDied","Data":"da0306cc0f0f259088ef981636b4faf2dd6638d9382065a03b42da4b95f1450b"} Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.361197 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6cb59b46c-trq59" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.361209 4689 scope.go:117] "RemoveContainer" containerID="2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.398271 4689 scope.go:117] "RemoveContainer" containerID="2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d" Jan 23 11:05:47 crc kubenswrapper[4689]: E0123 11:05:47.398998 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d\": container with ID starting with 2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d not found: ID does not exist" containerID="2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.399049 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d"} err="failed to get container status \"2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d\": rpc error: code = NotFound desc = could not find container \"2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d\": container with ID starting with 2020ad779c148514785b734c3c883ba045fbabfe2220e77ebb8f530d3f2d876d not found: ID does not exist" Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.406346 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-6cb59b46c-trq59"] Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.413180 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-6cb59b46c-trq59"] Jan 23 11:05:47 crc kubenswrapper[4689]: I0123 11:05:47.659009 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75f5477a-8c1c-4d7d-b6b5-373a337d3642" path="/var/lib/kubelet/pods/75f5477a-8c1c-4d7d-b6b5-373a337d3642/volumes" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.515089 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb"] Jan 23 11:05:51 crc kubenswrapper[4689]: E0123 11:05:51.516065 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75f5477a-8c1c-4d7d-b6b5-373a337d3642" containerName="console" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.516085 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="75f5477a-8c1c-4d7d-b6b5-373a337d3642" containerName="console" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.516340 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="75f5477a-8c1c-4d7d-b6b5-373a337d3642" containerName="console" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.517601 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.520745 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.524664 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb"] Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.637886 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-567hh\" (UniqueName: \"kubernetes.io/projected/c41acced-4000-4e9d-ade1-8fbc9f93e303-kube-api-access-567hh\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.637978 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.638014 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.740425 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.740516 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.740663 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-567hh\" (UniqueName: \"kubernetes.io/projected/c41acced-4000-4e9d-ade1-8fbc9f93e303-kube-api-access-567hh\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.741041 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.741072 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.759993 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-567hh\" (UniqueName: \"kubernetes.io/projected/c41acced-4000-4e9d-ade1-8fbc9f93e303-kube-api-access-567hh\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:51 crc kubenswrapper[4689]: I0123 11:05:51.849389 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:52 crc kubenswrapper[4689]: I0123 11:05:52.300793 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb"] Jan 23 11:05:52 crc kubenswrapper[4689]: I0123 11:05:52.398530 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" event={"ID":"c41acced-4000-4e9d-ade1-8fbc9f93e303","Type":"ContainerStarted","Data":"400fedcfe7c91145f84a301e54860d349360c31ce03f93446ce2b68020012732"} Jan 23 11:05:53 crc kubenswrapper[4689]: I0123 11:05:53.406900 4689 generic.go:334] "Generic (PLEG): container finished" podID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerID="7ca32c1f5aad9b72eef7c17dae92825173dcbd1670bcd887273d95e40bbb9123" exitCode=0 Jan 23 11:05:53 crc kubenswrapper[4689]: I0123 11:05:53.406952 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" event={"ID":"c41acced-4000-4e9d-ade1-8fbc9f93e303","Type":"ContainerDied","Data":"7ca32c1f5aad9b72eef7c17dae92825173dcbd1670bcd887273d95e40bbb9123"} Jan 23 11:05:53 crc kubenswrapper[4689]: I0123 11:05:53.408779 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:05:56 crc kubenswrapper[4689]: I0123 11:05:56.435540 4689 generic.go:334] "Generic (PLEG): container finished" podID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerID="286f85559280fc451e1503cb0fc56fe1d6ec05ea726ac802c8bf276d2a6c8e53" exitCode=0 Jan 23 11:05:56 crc kubenswrapper[4689]: I0123 11:05:56.435607 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" event={"ID":"c41acced-4000-4e9d-ade1-8fbc9f93e303","Type":"ContainerDied","Data":"286f85559280fc451e1503cb0fc56fe1d6ec05ea726ac802c8bf276d2a6c8e53"} Jan 23 11:05:57 crc kubenswrapper[4689]: I0123 11:05:57.445857 4689 generic.go:334] "Generic (PLEG): container finished" podID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerID="f34028ab84bdf4b66ea23f72f1e3ca4a8a5c98c156123d65077e462f601ffd11" exitCode=0 Jan 23 11:05:57 crc kubenswrapper[4689]: I0123 11:05:57.445934 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" event={"ID":"c41acced-4000-4e9d-ade1-8fbc9f93e303","Type":"ContainerDied","Data":"f34028ab84bdf4b66ea23f72f1e3ca4a8a5c98c156123d65077e462f601ffd11"} Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.765572 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.857127 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-util\") pod \"c41acced-4000-4e9d-ade1-8fbc9f93e303\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.857186 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-567hh\" (UniqueName: \"kubernetes.io/projected/c41acced-4000-4e9d-ade1-8fbc9f93e303-kube-api-access-567hh\") pod \"c41acced-4000-4e9d-ade1-8fbc9f93e303\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.857209 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-bundle\") pod \"c41acced-4000-4e9d-ade1-8fbc9f93e303\" (UID: \"c41acced-4000-4e9d-ade1-8fbc9f93e303\") " Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.858288 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-bundle" (OuterVolumeSpecName: "bundle") pod "c41acced-4000-4e9d-ade1-8fbc9f93e303" (UID: "c41acced-4000-4e9d-ade1-8fbc9f93e303"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.867899 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-util" (OuterVolumeSpecName: "util") pod "c41acced-4000-4e9d-ade1-8fbc9f93e303" (UID: "c41acced-4000-4e9d-ade1-8fbc9f93e303"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.871023 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c41acced-4000-4e9d-ade1-8fbc9f93e303-kube-api-access-567hh" (OuterVolumeSpecName: "kube-api-access-567hh") pod "c41acced-4000-4e9d-ade1-8fbc9f93e303" (UID: "c41acced-4000-4e9d-ade1-8fbc9f93e303"). InnerVolumeSpecName "kube-api-access-567hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.958875 4689 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-util\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.958918 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-567hh\" (UniqueName: \"kubernetes.io/projected/c41acced-4000-4e9d-ade1-8fbc9f93e303-kube-api-access-567hh\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:58 crc kubenswrapper[4689]: I0123 11:05:58.958934 4689 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c41acced-4000-4e9d-ade1-8fbc9f93e303-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:05:59 crc kubenswrapper[4689]: I0123 11:05:59.464106 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" event={"ID":"c41acced-4000-4e9d-ade1-8fbc9f93e303","Type":"ContainerDied","Data":"400fedcfe7c91145f84a301e54860d349360c31ce03f93446ce2b68020012732"} Jan 23 11:05:59 crc kubenswrapper[4689]: I0123 11:05:59.464638 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="400fedcfe7c91145f84a301e54860d349360c31ce03f93446ce2b68020012732" Jan 23 11:05:59 crc kubenswrapper[4689]: I0123 11:05:59.464199 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.261021 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx"] Jan 23 11:06:09 crc kubenswrapper[4689]: E0123 11:06:09.262394 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerName="pull" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.262414 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerName="pull" Jan 23 11:06:09 crc kubenswrapper[4689]: E0123 11:06:09.262427 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerName="extract" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.262433 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerName="extract" Jan 23 11:06:09 crc kubenswrapper[4689]: E0123 11:06:09.262448 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerName="util" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.262455 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerName="util" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.262628 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c41acced-4000-4e9d-ade1-8fbc9f93e303" containerName="extract" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.263698 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.265991 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.268191 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.276255 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.276408 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-pnkfh" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.278939 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.283408 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx"] Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.326090 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-apiservice-cert\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.326215 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7mpf\" (UniqueName: \"kubernetes.io/projected/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-kube-api-access-c7mpf\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.326317 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-webhook-cert\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.427838 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-webhook-cert\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.427987 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-apiservice-cert\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.428132 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7mpf\" (UniqueName: \"kubernetes.io/projected/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-kube-api-access-c7mpf\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.433889 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-apiservice-cert\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.443873 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-webhook-cert\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.447549 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7mpf\" (UniqueName: \"kubernetes.io/projected/af5f2d1f-74a0-4ac2-9e78-c81c3815f722-kube-api-access-c7mpf\") pod \"metallb-operator-controller-manager-6b5bd865cb-ppjnx\" (UID: \"af5f2d1f-74a0-4ac2-9e78-c81c3815f722\") " pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.581996 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.612588 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8"] Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.613721 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.617590 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.617707 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.618109 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-ncqln" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.633966 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-927px\" (UniqueName: \"kubernetes.io/projected/6087eb3b-66c0-4d14-a5de-008f086a59ee-kube-api-access-927px\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.634090 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6087eb3b-66c0-4d14-a5de-008f086a59ee-apiservice-cert\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.634131 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6087eb3b-66c0-4d14-a5de-008f086a59ee-webhook-cert\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.636104 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8"] Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.737347 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6087eb3b-66c0-4d14-a5de-008f086a59ee-apiservice-cert\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.738985 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6087eb3b-66c0-4d14-a5de-008f086a59ee-webhook-cert\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.739164 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-927px\" (UniqueName: \"kubernetes.io/projected/6087eb3b-66c0-4d14-a5de-008f086a59ee-kube-api-access-927px\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.743448 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6087eb3b-66c0-4d14-a5de-008f086a59ee-apiservice-cert\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.748870 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6087eb3b-66c0-4d14-a5de-008f086a59ee-webhook-cert\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.782310 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-927px\" (UniqueName: \"kubernetes.io/projected/6087eb3b-66c0-4d14-a5de-008f086a59ee-kube-api-access-927px\") pod \"metallb-operator-webhook-server-64d6f55f49-snsq8\" (UID: \"6087eb3b-66c0-4d14-a5de-008f086a59ee\") " pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:09 crc kubenswrapper[4689]: I0123 11:06:09.989911 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:10 crc kubenswrapper[4689]: I0123 11:06:10.205120 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx"] Jan 23 11:06:10 crc kubenswrapper[4689]: W0123 11:06:10.211370 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf5f2d1f_74a0_4ac2_9e78_c81c3815f722.slice/crio-46d68301db268f0cb5b24f9a338c0b53cea0df9fe7f71c1450a7413c343dfc82 WatchSource:0}: Error finding container 46d68301db268f0cb5b24f9a338c0b53cea0df9fe7f71c1450a7413c343dfc82: Status 404 returned error can't find the container with id 46d68301db268f0cb5b24f9a338c0b53cea0df9fe7f71c1450a7413c343dfc82 Jan 23 11:06:10 crc kubenswrapper[4689]: I0123 11:06:10.491813 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8"] Jan 23 11:06:10 crc kubenswrapper[4689]: I0123 11:06:10.550546 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" event={"ID":"6087eb3b-66c0-4d14-a5de-008f086a59ee","Type":"ContainerStarted","Data":"17797d3ac8c64cde4703e08aa50d2fe1b501a9b0a75853f6efb24c92476bafb5"} Jan 23 11:06:10 crc kubenswrapper[4689]: I0123 11:06:10.552178 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" event={"ID":"af5f2d1f-74a0-4ac2-9e78-c81c3815f722","Type":"ContainerStarted","Data":"46d68301db268f0cb5b24f9a338c0b53cea0df9fe7f71c1450a7413c343dfc82"} Jan 23 11:06:16 crc kubenswrapper[4689]: I0123 11:06:16.603115 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" event={"ID":"6087eb3b-66c0-4d14-a5de-008f086a59ee","Type":"ContainerStarted","Data":"c09b7791ff52d5b3704d52c0a91c3db861e0537267bebe09479b9a050c7febd4"} Jan 23 11:06:16 crc kubenswrapper[4689]: I0123 11:06:16.603730 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:16 crc kubenswrapper[4689]: I0123 11:06:16.605585 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" event={"ID":"af5f2d1f-74a0-4ac2-9e78-c81c3815f722","Type":"ContainerStarted","Data":"1040e6fbc746f9335cc73efbca7f4ca3966c7a5cc50a703175484ce84927f336"} Jan 23 11:06:16 crc kubenswrapper[4689]: I0123 11:06:16.606001 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:16 crc kubenswrapper[4689]: I0123 11:06:16.623767 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podStartSLOduration=2.514871646 podStartE2EDuration="7.623751338s" podCreationTimestamp="2026-01-23 11:06:09 +0000 UTC" firstStartedPulling="2026-01-23 11:06:10.507321261 +0000 UTC m=+1035.132001120" lastFinishedPulling="2026-01-23 11:06:15.616200933 +0000 UTC m=+1040.240880812" observedRunningTime="2026-01-23 11:06:16.619683753 +0000 UTC m=+1041.244363612" watchObservedRunningTime="2026-01-23 11:06:16.623751338 +0000 UTC m=+1041.248431197" Jan 23 11:06:16 crc kubenswrapper[4689]: I0123 11:06:16.677541 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" podStartSLOduration=2.306623716 podStartE2EDuration="7.677519286s" podCreationTimestamp="2026-01-23 11:06:09 +0000 UTC" firstStartedPulling="2026-01-23 11:06:10.215635159 +0000 UTC m=+1034.840315018" lastFinishedPulling="2026-01-23 11:06:15.586530729 +0000 UTC m=+1040.211210588" observedRunningTime="2026-01-23 11:06:16.672181791 +0000 UTC m=+1041.296861650" watchObservedRunningTime="2026-01-23 11:06:16.677519286 +0000 UTC m=+1041.302199145" Jan 23 11:06:30 crc kubenswrapper[4689]: I0123 11:06:30.000719 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 11:06:33 crc kubenswrapper[4689]: I0123 11:06:33.311524 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:06:33 crc kubenswrapper[4689]: I0123 11:06:33.312341 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:06:49 crc kubenswrapper[4689]: I0123 11:06:49.585554 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.372898 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-pkdqh"] Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.375992 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.381053 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.381241 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.381477 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-7hv9q" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.384168 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z"] Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.385364 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.387961 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.388908 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z"] Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.451844 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-tqgjs"] Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.459171 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.461814 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.462181 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.462256 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.462195 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-rfh6q" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.468411 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-mtbb4"] Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.470054 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.472506 4689 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.489307 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-mtbb4"] Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.512966 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-metrics-certs\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.513031 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-metrics\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.513056 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-conf\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.513090 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-startup\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.513161 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44p2n\" (UniqueName: \"kubernetes.io/projected/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-kube-api-access-44p2n\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.513198 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69r6q\" (UniqueName: \"kubernetes.io/projected/3e9ac503-1ccb-4008-866b-0e6e5a11227d-kube-api-access-69r6q\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcs9z\" (UID: \"3e9ac503-1ccb-4008-866b-0e6e5a11227d\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.513383 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-reloader\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.513474 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3e9ac503-1ccb-4008-866b-0e6e5a11227d-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcs9z\" (UID: \"3e9ac503-1ccb-4008-866b-0e6e5a11227d\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.514825 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-sockets\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.615898 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3e9ac503-1ccb-4008-866b-0e6e5a11227d-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcs9z\" (UID: \"3e9ac503-1ccb-4008-866b-0e6e5a11227d\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.615946 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-metallb-excludel2\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.615968 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-sockets\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616008 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-cert\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616025 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-metrics-certs\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616047 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-metrics\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616063 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-conf\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616087 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-startup\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616112 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44p2n\" (UniqueName: \"kubernetes.io/projected/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-kube-api-access-44p2n\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616131 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69r6q\" (UniqueName: \"kubernetes.io/projected/3e9ac503-1ccb-4008-866b-0e6e5a11227d-kube-api-access-69r6q\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcs9z\" (UID: \"3e9ac503-1ccb-4008-866b-0e6e5a11227d\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616172 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-metrics-certs\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616193 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvqzg\" (UniqueName: \"kubernetes.io/projected/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-kube-api-access-hvqzg\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616224 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46pmg\" (UniqueName: \"kubernetes.io/projected/4ab08845-476b-4601-9385-bbec37b18e35-kube-api-access-46pmg\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616245 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-metrics-certs\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616263 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616288 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-reloader\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616454 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-sockets\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616593 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-reloader\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.616797 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-conf\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.617325 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-metrics\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.617587 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-frr-startup\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.623753 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-metrics-certs\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.628942 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3e9ac503-1ccb-4008-866b-0e6e5a11227d-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcs9z\" (UID: \"3e9ac503-1ccb-4008-866b-0e6e5a11227d\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.638428 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69r6q\" (UniqueName: \"kubernetes.io/projected/3e9ac503-1ccb-4008-866b-0e6e5a11227d-kube-api-access-69r6q\") pod \"frr-k8s-webhook-server-7df86c4f6c-vcs9z\" (UID: \"3e9ac503-1ccb-4008-866b-0e6e5a11227d\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.638867 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44p2n\" (UniqueName: \"kubernetes.io/projected/2cd07ec1-86a5-45f4-b5a6-edaa4f185c17-kube-api-access-44p2n\") pod \"frr-k8s-pkdqh\" (UID: \"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17\") " pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.717731 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-cert\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.717834 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-metrics-certs\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.717866 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvqzg\" (UniqueName: \"kubernetes.io/projected/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-kube-api-access-hvqzg\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.717908 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46pmg\" (UniqueName: \"kubernetes.io/projected/4ab08845-476b-4601-9385-bbec37b18e35-kube-api-access-46pmg\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.717938 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-metrics-certs\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.717964 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.718021 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-metallb-excludel2\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: E0123 11:06:50.718673 4689 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 23 11:06:50 crc kubenswrapper[4689]: E0123 11:06:50.718761 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-metrics-certs podName:4ab08845-476b-4601-9385-bbec37b18e35 nodeName:}" failed. No retries permitted until 2026-01-23 11:06:51.218733148 +0000 UTC m=+1075.843413037 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-metrics-certs") pod "controller-6968d8fdc4-mtbb4" (UID: "4ab08845-476b-4601-9385-bbec37b18e35") : secret "controller-certs-secret" not found Jan 23 11:06:50 crc kubenswrapper[4689]: E0123 11:06:50.718771 4689 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.718953 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-metallb-excludel2\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: E0123 11:06:50.718959 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist podName:00b72a13-b4c0-43b7-97b9-1e9a1ec55edf nodeName:}" failed. No retries permitted until 2026-01-23 11:06:51.218928843 +0000 UTC m=+1075.843608732 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist") pod "speaker-tqgjs" (UID: "00b72a13-b4c0-43b7-97b9-1e9a1ec55edf") : secret "metallb-memberlist" not found Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.719439 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.720631 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-cert\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.728872 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-metrics-certs\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.729693 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.738187 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46pmg\" (UniqueName: \"kubernetes.io/projected/4ab08845-476b-4601-9385-bbec37b18e35-kube-api-access-46pmg\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:50 crc kubenswrapper[4689]: I0123 11:06:50.744069 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvqzg\" (UniqueName: \"kubernetes.io/projected/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-kube-api-access-hvqzg\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.183624 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z"] Jan 23 11:06:51 crc kubenswrapper[4689]: W0123 11:06:51.183677 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e9ac503_1ccb_4008_866b_0e6e5a11227d.slice/crio-9ed62695d056b9bc47960f88eb1e37b6fd5a9e7f08a56c7ec26cfc02ece7a77e WatchSource:0}: Error finding container 9ed62695d056b9bc47960f88eb1e37b6fd5a9e7f08a56c7ec26cfc02ece7a77e: Status 404 returned error can't find the container with id 9ed62695d056b9bc47960f88eb1e37b6fd5a9e7f08a56c7ec26cfc02ece7a77e Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.232542 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-metrics-certs\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.232605 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:51 crc kubenswrapper[4689]: E0123 11:06:51.232852 4689 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 23 11:06:51 crc kubenswrapper[4689]: E0123 11:06:51.232951 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist podName:00b72a13-b4c0-43b7-97b9-1e9a1ec55edf nodeName:}" failed. No retries permitted until 2026-01-23 11:06:52.232925115 +0000 UTC m=+1076.857604994 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist") pod "speaker-tqgjs" (UID: "00b72a13-b4c0-43b7-97b9-1e9a1ec55edf") : secret "metallb-memberlist" not found Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.240488 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ab08845-476b-4601-9385-bbec37b18e35-metrics-certs\") pod \"controller-6968d8fdc4-mtbb4\" (UID: \"4ab08845-476b-4601-9385-bbec37b18e35\") " pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.389458 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.866395 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" event={"ID":"3e9ac503-1ccb-4008-866b-0e6e5a11227d","Type":"ContainerStarted","Data":"9ed62695d056b9bc47960f88eb1e37b6fd5a9e7f08a56c7ec26cfc02ece7a77e"} Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.867831 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"a20e8c251a7ec4142e323fd43b70b54921eb75014d08ab1b23a4169ed3fec0c0"} Jan 23 11:06:51 crc kubenswrapper[4689]: I0123 11:06:51.928967 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-mtbb4"] Jan 23 11:06:51 crc kubenswrapper[4689]: W0123 11:06:51.942418 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ab08845_476b_4601_9385_bbec37b18e35.slice/crio-f19981265826468e0c259d2d5b9ee6b463ff0da2b3b479c3985f89d61ef08f01 WatchSource:0}: Error finding container f19981265826468e0c259d2d5b9ee6b463ff0da2b3b479c3985f89d61ef08f01: Status 404 returned error can't find the container with id f19981265826468e0c259d2d5b9ee6b463ff0da2b3b479c3985f89d61ef08f01 Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.250821 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.260125 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/00b72a13-b4c0-43b7-97b9-1e9a1ec55edf-memberlist\") pod \"speaker-tqgjs\" (UID: \"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf\") " pod="metallb-system/speaker-tqgjs" Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.274047 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tqgjs" Jan 23 11:06:52 crc kubenswrapper[4689]: W0123 11:06:52.305167 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00b72a13_b4c0_43b7_97b9_1e9a1ec55edf.slice/crio-022931181e2b381068309e283739f66fa341824363c25bb6a76155ddd3bea913 WatchSource:0}: Error finding container 022931181e2b381068309e283739f66fa341824363c25bb6a76155ddd3bea913: Status 404 returned error can't find the container with id 022931181e2b381068309e283739f66fa341824363c25bb6a76155ddd3bea913 Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.876392 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tqgjs" event={"ID":"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf","Type":"ContainerStarted","Data":"022931181e2b381068309e283739f66fa341824363c25bb6a76155ddd3bea913"} Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.878075 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtbb4" event={"ID":"4ab08845-476b-4601-9385-bbec37b18e35","Type":"ContainerStarted","Data":"d798a3fe62ced9df7774a5ee35bb1d5ad3565afb9ae41974b4249349eb61c35d"} Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.878104 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtbb4" event={"ID":"4ab08845-476b-4601-9385-bbec37b18e35","Type":"ContainerStarted","Data":"f7c8f26242835071c3b6ce1bf32e2ae2945dcb1b1f36757408a9a2e51abf8dfc"} Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.878121 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtbb4" event={"ID":"4ab08845-476b-4601-9385-bbec37b18e35","Type":"ContainerStarted","Data":"f19981265826468e0c259d2d5b9ee6b463ff0da2b3b479c3985f89d61ef08f01"} Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.878218 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:06:52 crc kubenswrapper[4689]: I0123 11:06:52.914399 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-mtbb4" podStartSLOduration=2.914376502 podStartE2EDuration="2.914376502s" podCreationTimestamp="2026-01-23 11:06:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:06:52.902639747 +0000 UTC m=+1077.527319606" watchObservedRunningTime="2026-01-23 11:06:52.914376502 +0000 UTC m=+1077.539056361" Jan 23 11:06:53 crc kubenswrapper[4689]: I0123 11:06:53.900297 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tqgjs" event={"ID":"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf","Type":"ContainerStarted","Data":"f6e7cb295635c37786202069e1e22490fee04e4ae8622761ad1ce69cf5ad6799"} Jan 23 11:06:54 crc kubenswrapper[4689]: I0123 11:06:54.912656 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tqgjs" event={"ID":"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf","Type":"ContainerStarted","Data":"4d164ee0697fd66be0102a489b6f3342c6d3ca692a7d8e73a2f89e9b176d1efb"} Jan 23 11:06:54 crc kubenswrapper[4689]: I0123 11:06:54.912789 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-tqgjs" Jan 23 11:06:54 crc kubenswrapper[4689]: I0123 11:06:54.947691 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-tqgjs" podStartSLOduration=4.947666129 podStartE2EDuration="4.947666129s" podCreationTimestamp="2026-01-23 11:06:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:06:54.941508734 +0000 UTC m=+1079.566188603" watchObservedRunningTime="2026-01-23 11:06:54.947666129 +0000 UTC m=+1079.572345988" Jan 23 11:06:58 crc kubenswrapper[4689]: I0123 11:06:58.957234 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" event={"ID":"3e9ac503-1ccb-4008-866b-0e6e5a11227d","Type":"ContainerStarted","Data":"3bf3d98f8e059ffe4c404e87f11c0199343c0bf81c244e9eb18ce59ec96e9182"} Jan 23 11:06:58 crc kubenswrapper[4689]: I0123 11:06:58.957877 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:06:58 crc kubenswrapper[4689]: I0123 11:06:58.960307 4689 generic.go:334] "Generic (PLEG): container finished" podID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerID="52900c1aa9e2f71b4d6ae1eea62f5889ed78dc75cfa0aab050fdb69119539499" exitCode=0 Jan 23 11:06:58 crc kubenswrapper[4689]: I0123 11:06:58.960354 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerDied","Data":"52900c1aa9e2f71b4d6ae1eea62f5889ed78dc75cfa0aab050fdb69119539499"} Jan 23 11:06:58 crc kubenswrapper[4689]: I0123 11:06:58.979832 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podStartSLOduration=1.609878401 podStartE2EDuration="8.979815856s" podCreationTimestamp="2026-01-23 11:06:50 +0000 UTC" firstStartedPulling="2026-01-23 11:06:51.186058158 +0000 UTC m=+1075.810738057" lastFinishedPulling="2026-01-23 11:06:58.555995653 +0000 UTC m=+1083.180675512" observedRunningTime="2026-01-23 11:06:58.977536842 +0000 UTC m=+1083.602216731" watchObservedRunningTime="2026-01-23 11:06:58.979815856 +0000 UTC m=+1083.604495715" Jan 23 11:06:59 crc kubenswrapper[4689]: I0123 11:06:59.970614 4689 generic.go:334] "Generic (PLEG): container finished" podID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerID="487e82477fa376d9d3f7d03ce468887ea294b814ddb956c04aeedb8b3db01be3" exitCode=0 Jan 23 11:06:59 crc kubenswrapper[4689]: I0123 11:06:59.970671 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerDied","Data":"487e82477fa376d9d3f7d03ce468887ea294b814ddb956c04aeedb8b3db01be3"} Jan 23 11:07:00 crc kubenswrapper[4689]: I0123 11:07:00.991661 4689 generic.go:334] "Generic (PLEG): container finished" podID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerID="58ca04b091f926be2c901429eedd6d8995972314582edade5256492c27b334ea" exitCode=0 Jan 23 11:07:00 crc kubenswrapper[4689]: I0123 11:07:00.991928 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerDied","Data":"58ca04b091f926be2c901429eedd6d8995972314582edade5256492c27b334ea"} Jan 23 11:07:02 crc kubenswrapper[4689]: I0123 11:07:02.002347 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"75e1f3f49cbfedda7953dcccd41cc7a1d2721a743ec320ad0cb8e3c5b8828f04"} Jan 23 11:07:02 crc kubenswrapper[4689]: I0123 11:07:02.002670 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"3ad289184c423703856fd84a19d9e1ebfbbe1a30997a2357d6535aef2e4b2c02"} Jan 23 11:07:02 crc kubenswrapper[4689]: I0123 11:07:02.002680 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"70bea961106d18a319a0e0945dd19824b1ee33b1e2b8c2047a446caa765ea03b"} Jan 23 11:07:02 crc kubenswrapper[4689]: I0123 11:07:02.002688 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"0bbf2301f466f95f4024c28175d4df0efc1cfdd968533398bbb13b728a6de448"} Jan 23 11:07:02 crc kubenswrapper[4689]: I0123 11:07:02.002698 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"c69e86b619f0f860ad22540bd294c686b0bcaff89701ee99aa0ff389609113bd"} Jan 23 11:07:02 crc kubenswrapper[4689]: I0123 11:07:02.277876 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-tqgjs" Jan 23 11:07:03 crc kubenswrapper[4689]: I0123 11:07:03.015795 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"ea1bd787f4b3bbd11224b84b6055ca32c87acf596c618b0ec87e3d7aaf417919"} Jan 23 11:07:03 crc kubenswrapper[4689]: I0123 11:07:03.016116 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:07:03 crc kubenswrapper[4689]: I0123 11:07:03.049238 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-pkdqh" podStartSLOduration=5.40349341 podStartE2EDuration="13.049216774s" podCreationTimestamp="2026-01-23 11:06:50 +0000 UTC" firstStartedPulling="2026-01-23 11:06:50.927507381 +0000 UTC m=+1075.552187260" lastFinishedPulling="2026-01-23 11:06:58.573230765 +0000 UTC m=+1083.197910624" observedRunningTime="2026-01-23 11:07:03.043415598 +0000 UTC m=+1087.668095467" watchObservedRunningTime="2026-01-23 11:07:03.049216774 +0000 UTC m=+1087.673896633" Jan 23 11:07:03 crc kubenswrapper[4689]: I0123 11:07:03.311016 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:07:03 crc kubenswrapper[4689]: I0123 11:07:03.311334 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.210006 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-d9bfx"] Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.213173 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.215846 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.216010 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.216240 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-f8kf8" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.265211 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d9bfx"] Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.325742 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pghbr\" (UniqueName: \"kubernetes.io/projected/d92e2c5f-df9d-44e5-839c-806799a650a4-kube-api-access-pghbr\") pod \"openstack-operator-index-d9bfx\" (UID: \"d92e2c5f-df9d-44e5-839c-806799a650a4\") " pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.427444 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pghbr\" (UniqueName: \"kubernetes.io/projected/d92e2c5f-df9d-44e5-839c-806799a650a4-kube-api-access-pghbr\") pod \"openstack-operator-index-d9bfx\" (UID: \"d92e2c5f-df9d-44e5-839c-806799a650a4\") " pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.448458 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pghbr\" (UniqueName: \"kubernetes.io/projected/d92e2c5f-df9d-44e5-839c-806799a650a4-kube-api-access-pghbr\") pod \"openstack-operator-index-d9bfx\" (UID: \"d92e2c5f-df9d-44e5-839c-806799a650a4\") " pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.546292 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.720349 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:07:05 crc kubenswrapper[4689]: I0123 11:07:05.762227 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:07:08 crc kubenswrapper[4689]: I0123 11:07:08.932864 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d9bfx"] Jan 23 11:07:08 crc kubenswrapper[4689]: W0123 11:07:08.940362 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd92e2c5f_df9d_44e5_839c_806799a650a4.slice/crio-80e3ee71aea46413205814f7c564533960fbaeae815efc780f2acf36d5daf21b WatchSource:0}: Error finding container 80e3ee71aea46413205814f7c564533960fbaeae815efc780f2acf36d5daf21b: Status 404 returned error can't find the container with id 80e3ee71aea46413205814f7c564533960fbaeae815efc780f2acf36d5daf21b Jan 23 11:07:09 crc kubenswrapper[4689]: I0123 11:07:09.063275 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d9bfx" event={"ID":"d92e2c5f-df9d-44e5-839c-806799a650a4","Type":"ContainerStarted","Data":"80e3ee71aea46413205814f7c564533960fbaeae815efc780f2acf36d5daf21b"} Jan 23 11:07:10 crc kubenswrapper[4689]: I0123 11:07:10.734965 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 11:07:11 crc kubenswrapper[4689]: I0123 11:07:11.397209 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 11:07:12 crc kubenswrapper[4689]: I0123 11:07:12.094108 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d9bfx" event={"ID":"d92e2c5f-df9d-44e5-839c-806799a650a4","Type":"ContainerStarted","Data":"4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9"} Jan 23 11:07:12 crc kubenswrapper[4689]: I0123 11:07:12.115833 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-d9bfx" podStartSLOduration=5.03868453 podStartE2EDuration="7.115810851s" podCreationTimestamp="2026-01-23 11:07:05 +0000 UTC" firstStartedPulling="2026-01-23 11:07:08.942099632 +0000 UTC m=+1093.566779491" lastFinishedPulling="2026-01-23 11:07:11.019225953 +0000 UTC m=+1095.643905812" observedRunningTime="2026-01-23 11:07:12.11537348 +0000 UTC m=+1096.740053379" watchObservedRunningTime="2026-01-23 11:07:12.115810851 +0000 UTC m=+1096.740490700" Jan 23 11:07:15 crc kubenswrapper[4689]: I0123 11:07:15.547384 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:15 crc kubenswrapper[4689]: I0123 11:07:15.548422 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:15 crc kubenswrapper[4689]: I0123 11:07:15.586194 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:16 crc kubenswrapper[4689]: I0123 11:07:16.174052 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 11:07:20 crc kubenswrapper[4689]: I0123 11:07:20.726003 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-pkdqh" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.443036 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x"] Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.445361 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.448903 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-cnnx7" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.458936 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x"] Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.568411 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-bundle\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.568664 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-util\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.569085 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cw8v\" (UniqueName: \"kubernetes.io/projected/2496f05e-2cab-45ec-8c73-1820e5c268f6-kube-api-access-6cw8v\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.670542 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cw8v\" (UniqueName: \"kubernetes.io/projected/2496f05e-2cab-45ec-8c73-1820e5c268f6-kube-api-access-6cw8v\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.670605 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-bundle\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.670652 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-util\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.671135 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-util\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.671336 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-bundle\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.698228 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cw8v\" (UniqueName: \"kubernetes.io/projected/2496f05e-2cab-45ec-8c73-1820e5c268f6-kube-api-access-6cw8v\") pod \"2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:22 crc kubenswrapper[4689]: I0123 11:07:22.788199 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:23 crc kubenswrapper[4689]: I0123 11:07:23.216866 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x"] Jan 23 11:07:23 crc kubenswrapper[4689]: W0123 11:07:23.217054 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2496f05e_2cab_45ec_8c73_1820e5c268f6.slice/crio-a0d6b61028349e72c832f7aefa786a6ceb7e52378e8b14d697196901790390c7 WatchSource:0}: Error finding container a0d6b61028349e72c832f7aefa786a6ceb7e52378e8b14d697196901790390c7: Status 404 returned error can't find the container with id a0d6b61028349e72c832f7aefa786a6ceb7e52378e8b14d697196901790390c7 Jan 23 11:07:24 crc kubenswrapper[4689]: I0123 11:07:24.224193 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" event={"ID":"2496f05e-2cab-45ec-8c73-1820e5c268f6","Type":"ContainerStarted","Data":"a0d6b61028349e72c832f7aefa786a6ceb7e52378e8b14d697196901790390c7"} Jan 23 11:07:29 crc kubenswrapper[4689]: I0123 11:07:29.276745 4689 generic.go:334] "Generic (PLEG): container finished" podID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerID="4cafb2ee817514ca73be34a17db5dd7f1b89c3cc8570c61dd1bcfcdf372cd300" exitCode=0 Jan 23 11:07:29 crc kubenswrapper[4689]: I0123 11:07:29.276855 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" event={"ID":"2496f05e-2cab-45ec-8c73-1820e5c268f6","Type":"ContainerDied","Data":"4cafb2ee817514ca73be34a17db5dd7f1b89c3cc8570c61dd1bcfcdf372cd300"} Jan 23 11:07:31 crc kubenswrapper[4689]: I0123 11:07:31.296969 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" event={"ID":"2496f05e-2cab-45ec-8c73-1820e5c268f6","Type":"ContainerStarted","Data":"db30a8830330570dea2f659c20d25963045f1e6c2be9cfef03a3bbc27432140f"} Jan 23 11:07:32 crc kubenswrapper[4689]: I0123 11:07:32.305534 4689 generic.go:334] "Generic (PLEG): container finished" podID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerID="db30a8830330570dea2f659c20d25963045f1e6c2be9cfef03a3bbc27432140f" exitCode=0 Jan 23 11:07:32 crc kubenswrapper[4689]: I0123 11:07:32.305631 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" event={"ID":"2496f05e-2cab-45ec-8c73-1820e5c268f6","Type":"ContainerDied","Data":"db30a8830330570dea2f659c20d25963045f1e6c2be9cfef03a3bbc27432140f"} Jan 23 11:07:33 crc kubenswrapper[4689]: I0123 11:07:33.312072 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:07:33 crc kubenswrapper[4689]: I0123 11:07:33.312141 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:07:33 crc kubenswrapper[4689]: I0123 11:07:33.312206 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:07:33 crc kubenswrapper[4689]: I0123 11:07:33.317665 4689 generic.go:334] "Generic (PLEG): container finished" podID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerID="d7e79ec716143760952d654b884d3af349af076c8795ea1e68763f813db86a65" exitCode=0 Jan 23 11:07:33 crc kubenswrapper[4689]: I0123 11:07:33.317731 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" event={"ID":"2496f05e-2cab-45ec-8c73-1820e5c268f6","Type":"ContainerDied","Data":"d7e79ec716143760952d654b884d3af349af076c8795ea1e68763f813db86a65"} Jan 23 11:07:33 crc kubenswrapper[4689]: I0123 11:07:33.318659 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d3cefa8656f04de15341e4eb597dc2badbed1ac4c35f5187204a360ab5c0ac81"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:07:33 crc kubenswrapper[4689]: I0123 11:07:33.318747 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://d3cefa8656f04de15341e4eb597dc2badbed1ac4c35f5187204a360ab5c0ac81" gracePeriod=600 Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.327514 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="d3cefa8656f04de15341e4eb597dc2badbed1ac4c35f5187204a360ab5c0ac81" exitCode=0 Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.327584 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"d3cefa8656f04de15341e4eb597dc2badbed1ac4c35f5187204a360ab5c0ac81"} Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.328254 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"eae776292d106a5845830ce3ec53dd7e23f7ffa3aa758190f8018f2db4651041"} Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.328316 4689 scope.go:117] "RemoveContainer" containerID="c7ede398ad329e3d1da033621f66a70a5e2c9501cd8a9e0138db79e18cff983d" Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.644030 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.779085 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-bundle\") pod \"2496f05e-2cab-45ec-8c73-1820e5c268f6\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.779220 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cw8v\" (UniqueName: \"kubernetes.io/projected/2496f05e-2cab-45ec-8c73-1820e5c268f6-kube-api-access-6cw8v\") pod \"2496f05e-2cab-45ec-8c73-1820e5c268f6\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.779361 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-util\") pod \"2496f05e-2cab-45ec-8c73-1820e5c268f6\" (UID: \"2496f05e-2cab-45ec-8c73-1820e5c268f6\") " Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.780580 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-bundle" (OuterVolumeSpecName: "bundle") pod "2496f05e-2cab-45ec-8c73-1820e5c268f6" (UID: "2496f05e-2cab-45ec-8c73-1820e5c268f6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.788831 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2496f05e-2cab-45ec-8c73-1820e5c268f6-kube-api-access-6cw8v" (OuterVolumeSpecName: "kube-api-access-6cw8v") pod "2496f05e-2cab-45ec-8c73-1820e5c268f6" (UID: "2496f05e-2cab-45ec-8c73-1820e5c268f6"). InnerVolumeSpecName "kube-api-access-6cw8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.881706 4689 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:07:34 crc kubenswrapper[4689]: I0123 11:07:34.881741 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cw8v\" (UniqueName: \"kubernetes.io/projected/2496f05e-2cab-45ec-8c73-1820e5c268f6-kube-api-access-6cw8v\") on node \"crc\" DevicePath \"\"" Jan 23 11:07:35 crc kubenswrapper[4689]: I0123 11:07:35.172660 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-util" (OuterVolumeSpecName: "util") pod "2496f05e-2cab-45ec-8c73-1820e5c268f6" (UID: "2496f05e-2cab-45ec-8c73-1820e5c268f6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:07:35 crc kubenswrapper[4689]: I0123 11:07:35.187608 4689 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2496f05e-2cab-45ec-8c73-1820e5c268f6-util\") on node \"crc\" DevicePath \"\"" Jan 23 11:07:35 crc kubenswrapper[4689]: I0123 11:07:35.339635 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" Jan 23 11:07:35 crc kubenswrapper[4689]: I0123 11:07:35.339684 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x" event={"ID":"2496f05e-2cab-45ec-8c73-1820e5c268f6","Type":"ContainerDied","Data":"a0d6b61028349e72c832f7aefa786a6ceb7e52378e8b14d697196901790390c7"} Jan 23 11:07:35 crc kubenswrapper[4689]: I0123 11:07:35.339756 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0d6b61028349e72c832f7aefa786a6ceb7e52378e8b14d697196901790390c7" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.841227 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8"] Jan 23 11:07:38 crc kubenswrapper[4689]: E0123 11:07:38.842135 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerName="util" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.842171 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerName="util" Jan 23 11:07:38 crc kubenswrapper[4689]: E0123 11:07:38.842211 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerName="pull" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.842220 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerName="pull" Jan 23 11:07:38 crc kubenswrapper[4689]: E0123 11:07:38.842247 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerName="extract" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.842255 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerName="extract" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.842437 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2496f05e-2cab-45ec-8c73-1820e5c268f6" containerName="extract" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.843138 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.848245 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-hxqqt" Jan 23 11:07:38 crc kubenswrapper[4689]: I0123 11:07:38.881338 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8"] Jan 23 11:07:39 crc kubenswrapper[4689]: I0123 11:07:39.009065 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swqct\" (UniqueName: \"kubernetes.io/projected/53597531-35c9-4478-95cc-690c554f04d0-kube-api-access-swqct\") pod \"openstack-operator-controller-init-7dc68b46f7-8szn8\" (UID: \"53597531-35c9-4478-95cc-690c554f04d0\") " pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 11:07:39 crc kubenswrapper[4689]: I0123 11:07:39.110065 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swqct\" (UniqueName: \"kubernetes.io/projected/53597531-35c9-4478-95cc-690c554f04d0-kube-api-access-swqct\") pod \"openstack-operator-controller-init-7dc68b46f7-8szn8\" (UID: \"53597531-35c9-4478-95cc-690c554f04d0\") " pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 11:07:39 crc kubenswrapper[4689]: I0123 11:07:39.131675 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swqct\" (UniqueName: \"kubernetes.io/projected/53597531-35c9-4478-95cc-690c554f04d0-kube-api-access-swqct\") pod \"openstack-operator-controller-init-7dc68b46f7-8szn8\" (UID: \"53597531-35c9-4478-95cc-690c554f04d0\") " pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 11:07:39 crc kubenswrapper[4689]: I0123 11:07:39.162081 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 11:07:39 crc kubenswrapper[4689]: W0123 11:07:39.657257 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53597531_35c9_4478_95cc_690c554f04d0.slice/crio-876ec92dfc9cd8f674366eaf2abf2bef1472589db971ce336fa460518a1aa7ed WatchSource:0}: Error finding container 876ec92dfc9cd8f674366eaf2abf2bef1472589db971ce336fa460518a1aa7ed: Status 404 returned error can't find the container with id 876ec92dfc9cd8f674366eaf2abf2bef1472589db971ce336fa460518a1aa7ed Jan 23 11:07:39 crc kubenswrapper[4689]: I0123 11:07:39.667072 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8"] Jan 23 11:07:40 crc kubenswrapper[4689]: I0123 11:07:40.389998 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" event={"ID":"53597531-35c9-4478-95cc-690c554f04d0","Type":"ContainerStarted","Data":"876ec92dfc9cd8f674366eaf2abf2bef1472589db971ce336fa460518a1aa7ed"} Jan 23 11:07:44 crc kubenswrapper[4689]: I0123 11:07:44.432379 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" event={"ID":"53597531-35c9-4478-95cc-690c554f04d0","Type":"ContainerStarted","Data":"ef07d0ae36d941c862210e8f123a8a2796d41e0ae432c3c9b206097fa1e22fbd"} Jan 23 11:07:44 crc kubenswrapper[4689]: I0123 11:07:44.432970 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 11:07:44 crc kubenswrapper[4689]: I0123 11:07:44.467204 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" podStartSLOduration=2.224368409 podStartE2EDuration="6.467180584s" podCreationTimestamp="2026-01-23 11:07:38 +0000 UTC" firstStartedPulling="2026-01-23 11:07:39.65949858 +0000 UTC m=+1124.284178449" lastFinishedPulling="2026-01-23 11:07:43.902310765 +0000 UTC m=+1128.526990624" observedRunningTime="2026-01-23 11:07:44.462904757 +0000 UTC m=+1129.087584636" watchObservedRunningTime="2026-01-23 11:07:44.467180584 +0000 UTC m=+1129.091860443" Jan 23 11:07:49 crc kubenswrapper[4689]: I0123 11:07:49.165554 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.621465 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.622788 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.625000 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-gnbfz" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.628873 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.629868 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.631489 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-wzlcj" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.638312 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.652191 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.657698 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.658876 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.662603 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-bhwt4" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.683757 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.689882 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.700236 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.706532 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-qjt4w" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.707389 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.708284 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.715448 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-s6596" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.731141 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.749560 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.754093 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2dlz\" (UniqueName: \"kubernetes.io/projected/c9dc7063-1b29-40e1-b451-e9dc882e7476-kube-api-access-j2dlz\") pod \"barbican-operator-controller-manager-59dd8b7cbf-86c6v\" (UID: \"c9dc7063-1b29-40e1-b451-e9dc882e7476\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.754215 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdjqp\" (UniqueName: \"kubernetes.io/projected/d587cb55-dfd2-42e6-bb32-3a4202dd05c5-kube-api-access-hdjqp\") pod \"cinder-operator-controller-manager-69cf5d4557-q2clt\" (UID: \"d587cb55-dfd2-42e6-bb32-3a4202dd05c5\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.779614 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.780587 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.787282 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-jrkgp" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.804440 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.828264 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.829254 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.831680 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-nnlnp" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.843380 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.844341 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.856496 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-4qwhw" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.857045 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.857227 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.858259 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.858758 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdjqp\" (UniqueName: \"kubernetes.io/projected/d587cb55-dfd2-42e6-bb32-3a4202dd05c5-kube-api-access-hdjqp\") pod \"cinder-operator-controller-manager-69cf5d4557-q2clt\" (UID: \"d587cb55-dfd2-42e6-bb32-3a4202dd05c5\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.858828 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nx9r9\" (UniqueName: \"kubernetes.io/projected/54403d19-67da-4783-8b45-b7070bc15424-kube-api-access-nx9r9\") pod \"designate-operator-controller-manager-b45d7bf98-22rdn\" (UID: \"54403d19-67da-4783-8b45-b7070bc15424\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.858854 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r72gx\" (UniqueName: \"kubernetes.io/projected/39db2be1-cb37-4ca9-af8a-5ce0f2d1db16-kube-api-access-r72gx\") pod \"glance-operator-controller-manager-78fdd796fd-dl6g9\" (UID: \"39db2be1-cb37-4ca9-af8a-5ce0f2d1db16\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.858908 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2dlz\" (UniqueName: \"kubernetes.io/projected/c9dc7063-1b29-40e1-b451-e9dc882e7476-kube-api-access-j2dlz\") pod \"barbican-operator-controller-manager-59dd8b7cbf-86c6v\" (UID: \"c9dc7063-1b29-40e1-b451-e9dc882e7476\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.858934 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp6hf\" (UniqueName: \"kubernetes.io/projected/1f6a7f15-609b-414e-8119-366afe98811f-kube-api-access-tp6hf\") pod \"heat-operator-controller-manager-594c8c9d5d-s69pd\" (UID: \"1f6a7f15-609b-414e-8119-366afe98811f\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.867722 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-x9pk2" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.869004 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.920224 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677"] Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.937181 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdjqp\" (UniqueName: \"kubernetes.io/projected/d587cb55-dfd2-42e6-bb32-3a4202dd05c5-kube-api-access-hdjqp\") pod \"cinder-operator-controller-manager-69cf5d4557-q2clt\" (UID: \"d587cb55-dfd2-42e6-bb32-3a4202dd05c5\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.963577 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.965580 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp6hf\" (UniqueName: \"kubernetes.io/projected/1f6a7f15-609b-414e-8119-366afe98811f-kube-api-access-tp6hf\") pod \"heat-operator-controller-manager-594c8c9d5d-s69pd\" (UID: \"1f6a7f15-609b-414e-8119-366afe98811f\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.965662 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n5hv\" (UniqueName: \"kubernetes.io/projected/3d3561eb-7369-4466-b9ee-037e02b2c219-kube-api-access-5n5hv\") pod \"ironic-operator-controller-manager-69d6c9f5b8-wh677\" (UID: \"3d3561eb-7369-4466-b9ee-037e02b2c219\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.965712 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwmhs\" (UniqueName: \"kubernetes.io/projected/d55b5d87-6f4b-4eb7-bfc7-025b936cebb9-kube-api-access-jwmhs\") pod \"keystone-operator-controller-manager-b8b6d4659-b8s9h\" (UID: \"d55b5d87-6f4b-4eb7-bfc7-025b936cebb9\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.965753 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fw7t5\" (UniqueName: \"kubernetes.io/projected/8359ad74-2a40-4f5f-afe6-880a3f0a990e-kube-api-access-fw7t5\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.965781 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nx9r9\" (UniqueName: \"kubernetes.io/projected/54403d19-67da-4783-8b45-b7070bc15424-kube-api-access-nx9r9\") pod \"designate-operator-controller-manager-b45d7bf98-22rdn\" (UID: \"54403d19-67da-4783-8b45-b7070bc15424\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.965811 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmtwc\" (UniqueName: \"kubernetes.io/projected/b5e62e31-60a7-4964-b3e7-611e7a8bfa81-kube-api-access-wmtwc\") pod \"horizon-operator-controller-manager-77d5c5b54f-cgxb7\" (UID: \"b5e62e31-60a7-4964-b3e7-611e7a8bfa81\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.965837 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r72gx\" (UniqueName: \"kubernetes.io/projected/39db2be1-cb37-4ca9-af8a-5ce0f2d1db16-kube-api-access-r72gx\") pod \"glance-operator-controller-manager-78fdd796fd-dl6g9\" (UID: \"39db2be1-cb37-4ca9-af8a-5ce0f2d1db16\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 11:08:27 crc kubenswrapper[4689]: I0123 11:08:27.978367 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2dlz\" (UniqueName: \"kubernetes.io/projected/c9dc7063-1b29-40e1-b451-e9dc882e7476-kube-api-access-j2dlz\") pod \"barbican-operator-controller-manager-59dd8b7cbf-86c6v\" (UID: \"c9dc7063-1b29-40e1-b451-e9dc882e7476\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.005430 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.017251 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp6hf\" (UniqueName: \"kubernetes.io/projected/1f6a7f15-609b-414e-8119-366afe98811f-kube-api-access-tp6hf\") pod \"heat-operator-controller-manager-594c8c9d5d-s69pd\" (UID: \"1f6a7f15-609b-414e-8119-366afe98811f\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.018222 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nx9r9\" (UniqueName: \"kubernetes.io/projected/54403d19-67da-4783-8b45-b7070bc15424-kube-api-access-nx9r9\") pod \"designate-operator-controller-manager-b45d7bf98-22rdn\" (UID: \"54403d19-67da-4783-8b45-b7070bc15424\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.029246 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r72gx\" (UniqueName: \"kubernetes.io/projected/39db2be1-cb37-4ca9-af8a-5ce0f2d1db16-kube-api-access-r72gx\") pod \"glance-operator-controller-manager-78fdd796fd-dl6g9\" (UID: \"39db2be1-cb37-4ca9-af8a-5ce0f2d1db16\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.041286 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.043102 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.054765 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.103226 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-2cqkb" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.106443 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwmhs\" (UniqueName: \"kubernetes.io/projected/d55b5d87-6f4b-4eb7-bfc7-025b936cebb9-kube-api-access-jwmhs\") pod \"keystone-operator-controller-manager-b8b6d4659-b8s9h\" (UID: \"d55b5d87-6f4b-4eb7-bfc7-025b936cebb9\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.106495 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vltff\" (UniqueName: \"kubernetes.io/projected/28a286e0-4072-40b0-aa95-4a12299f5a72-kube-api-access-vltff\") pod \"manila-operator-controller-manager-78c6999f6f-szbq7\" (UID: \"28a286e0-4072-40b0-aa95-4a12299f5a72\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.106537 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fw7t5\" (UniqueName: \"kubernetes.io/projected/8359ad74-2a40-4f5f-afe6-880a3f0a990e-kube-api-access-fw7t5\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.106565 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmtwc\" (UniqueName: \"kubernetes.io/projected/b5e62e31-60a7-4964-b3e7-611e7a8bfa81-kube-api-access-wmtwc\") pod \"horizon-operator-controller-manager-77d5c5b54f-cgxb7\" (UID: \"b5e62e31-60a7-4964-b3e7-611e7a8bfa81\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.106620 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.106732 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n5hv\" (UniqueName: \"kubernetes.io/projected/3d3561eb-7369-4466-b9ee-037e02b2c219-kube-api-access-5n5hv\") pod \"ironic-operator-controller-manager-69d6c9f5b8-wh677\" (UID: \"3d3561eb-7369-4466-b9ee-037e02b2c219\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 11:08:28 crc kubenswrapper[4689]: E0123 11:08:28.107086 4689 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:28 crc kubenswrapper[4689]: E0123 11:08:28.107130 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert podName:8359ad74-2a40-4f5f-afe6-880a3f0a990e nodeName:}" failed. No retries permitted until 2026-01-23 11:08:28.607115653 +0000 UTC m=+1173.231795512 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert") pod "infra-operator-controller-manager-54ccf4f85d-l5n7d" (UID: "8359ad74-2a40-4f5f-afe6-880a3f0a990e") : secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.122219 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.133850 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n5hv\" (UniqueName: \"kubernetes.io/projected/3d3561eb-7369-4466-b9ee-037e02b2c219-kube-api-access-5n5hv\") pod \"ironic-operator-controller-manager-69d6c9f5b8-wh677\" (UID: \"3d3561eb-7369-4466-b9ee-037e02b2c219\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.146689 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwmhs\" (UniqueName: \"kubernetes.io/projected/d55b5d87-6f4b-4eb7-bfc7-025b936cebb9-kube-api-access-jwmhs\") pod \"keystone-operator-controller-manager-b8b6d4659-b8s9h\" (UID: \"d55b5d87-6f4b-4eb7-bfc7-025b936cebb9\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.159134 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmtwc\" (UniqueName: \"kubernetes.io/projected/b5e62e31-60a7-4964-b3e7-611e7a8bfa81-kube-api-access-wmtwc\") pod \"horizon-operator-controller-manager-77d5c5b54f-cgxb7\" (UID: \"b5e62e31-60a7-4964-b3e7-611e7a8bfa81\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.159685 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.162414 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fw7t5\" (UniqueName: \"kubernetes.io/projected/8359ad74-2a40-4f5f-afe6-880a3f0a990e-kube-api-access-fw7t5\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.185298 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.187812 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.190735 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-t6c7c" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.207153 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.210235 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vltff\" (UniqueName: \"kubernetes.io/projected/28a286e0-4072-40b0-aa95-4a12299f5a72-kube-api-access-vltff\") pod \"manila-operator-controller-manager-78c6999f6f-szbq7\" (UID: \"28a286e0-4072-40b0-aa95-4a12299f5a72\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.226214 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.227520 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.231614 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vltff\" (UniqueName: \"kubernetes.io/projected/28a286e0-4072-40b0-aa95-4a12299f5a72-kube-api-access-vltff\") pod \"manila-operator-controller-manager-78c6999f6f-szbq7\" (UID: \"28a286e0-4072-40b0-aa95-4a12299f5a72\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.231835 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-5fz99" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.246033 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.274271 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.275393 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.282302 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-7xvvp" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.284316 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.302094 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.302533 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.305454 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.312678 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr2v7\" (UniqueName: \"kubernetes.io/projected/5f4d15d8-f941-4082-ab51-3ecda5527f9b-kube-api-access-rr2v7\") pod \"mariadb-operator-controller-manager-c87fff755-m8m6r\" (UID: \"5f4d15d8-f941-4082-ab51-3ecda5527f9b\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.325091 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.327542 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.346373 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.348041 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.352659 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-8w4gf" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.359467 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.400555 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.412477 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.413774 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.415954 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc8dg\" (UniqueName: \"kubernetes.io/projected/2bd7a193-5394-452e-9315-0332e4a4e667-kube-api-access-nc8dg\") pod \"nova-operator-controller-manager-6b8bc8d87d-2lwwn\" (UID: \"2bd7a193-5394-452e-9315-0332e4a4e667\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.416057 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcv79\" (UniqueName: \"kubernetes.io/projected/a9f05c03-72c2-4906-b327-df50d5922d28-kube-api-access-mcv79\") pod \"neutron-operator-controller-manager-5d8f59fb49-rt7xn\" (UID: \"a9f05c03-72c2-4906-b327-df50d5922d28\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.416115 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr2v7\" (UniqueName: \"kubernetes.io/projected/5f4d15d8-f941-4082-ab51-3ecda5527f9b-kube-api-access-rr2v7\") pod \"mariadb-operator-controller-manager-c87fff755-m8m6r\" (UID: \"5f4d15d8-f941-4082-ab51-3ecda5527f9b\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.423519 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-97tq9" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.441905 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.457876 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr2v7\" (UniqueName: \"kubernetes.io/projected/5f4d15d8-f941-4082-ab51-3ecda5527f9b-kube-api-access-rr2v7\") pod \"mariadb-operator-controller-manager-c87fff755-m8m6r\" (UID: \"5f4d15d8-f941-4082-ab51-3ecda5527f9b\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.498276 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.499318 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.503365 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-clt4f" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.503549 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.520108 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq2rk\" (UniqueName: \"kubernetes.io/projected/167d35d1-8eb3-492e-beb3-4325d183c7b9-kube-api-access-zq2rk\") pod \"octavia-operator-controller-manager-7bd9774b6-m2sb7\" (UID: \"167d35d1-8eb3-492e-beb3-4325d183c7b9\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.520191 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc8dg\" (UniqueName: \"kubernetes.io/projected/2bd7a193-5394-452e-9315-0332e4a4e667-kube-api-access-nc8dg\") pod \"nova-operator-controller-manager-6b8bc8d87d-2lwwn\" (UID: \"2bd7a193-5394-452e-9315-0332e4a4e667\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.520316 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbqls\" (UniqueName: \"kubernetes.io/projected/d36ac685-507d-4cfa-b6fe-7f595536c32f-kube-api-access-xbqls\") pod \"ovn-operator-controller-manager-55db956ddc-5rkch\" (UID: \"d36ac685-507d-4cfa-b6fe-7f595536c32f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.520354 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcv79\" (UniqueName: \"kubernetes.io/projected/a9f05c03-72c2-4906-b327-df50d5922d28-kube-api-access-mcv79\") pod \"neutron-operator-controller-manager-5d8f59fb49-rt7xn\" (UID: \"a9f05c03-72c2-4906-b327-df50d5922d28\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.526867 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.530953 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.550522 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.551598 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.557211 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-cjhgs" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.557337 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc8dg\" (UniqueName: \"kubernetes.io/projected/2bd7a193-5394-452e-9315-0332e4a4e667-kube-api-access-nc8dg\") pod \"nova-operator-controller-manager-6b8bc8d87d-2lwwn\" (UID: \"2bd7a193-5394-452e-9315-0332e4a4e667\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.563767 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcv79\" (UniqueName: \"kubernetes.io/projected/a9f05c03-72c2-4906-b327-df50d5922d28-kube-api-access-mcv79\") pod \"neutron-operator-controller-manager-5d8f59fb49-rt7xn\" (UID: \"a9f05c03-72c2-4906-b327-df50d5922d28\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.579240 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.580254 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.602167 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-mw52c" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.615204 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.615253 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.636341 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.636394 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28f2m\" (UniqueName: \"kubernetes.io/projected/3369528a-f39f-4e47-92e9-abbca4395b98-kube-api-access-28f2m\") pod \"placement-operator-controller-manager-5d646b7d76-kklnd\" (UID: \"3369528a-f39f-4e47-92e9-abbca4395b98\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.636445 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.636473 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq2rk\" (UniqueName: \"kubernetes.io/projected/167d35d1-8eb3-492e-beb3-4325d183c7b9-kube-api-access-zq2rk\") pod \"octavia-operator-controller-manager-7bd9774b6-m2sb7\" (UID: \"167d35d1-8eb3-492e-beb3-4325d183c7b9\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.636707 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn2sm\" (UniqueName: \"kubernetes.io/projected/99f43d3e-dce3-4f53-90a5-76793663baaf-kube-api-access-rn2sm\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.636937 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjrd7\" (UniqueName: \"kubernetes.io/projected/72fb2e87-da8d-4db1-b255-d38d7c15b5cd-kube-api-access-zjrd7\") pod \"swift-operator-controller-manager-547cbdb99f-t97lk\" (UID: \"72fb2e87-da8d-4db1-b255-d38d7c15b5cd\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.636992 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbqls\" (UniqueName: \"kubernetes.io/projected/d36ac685-507d-4cfa-b6fe-7f595536c32f-kube-api-access-xbqls\") pod \"ovn-operator-controller-manager-55db956ddc-5rkch\" (UID: \"d36ac685-507d-4cfa-b6fe-7f595536c32f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 11:08:28 crc kubenswrapper[4689]: E0123 11:08:28.637300 4689 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:28 crc kubenswrapper[4689]: E0123 11:08:28.637374 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert podName:8359ad74-2a40-4f5f-afe6-880a3f0a990e nodeName:}" failed. No retries permitted until 2026-01-23 11:08:29.637354115 +0000 UTC m=+1174.262033974 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert") pod "infra-operator-controller-manager-54ccf4f85d-l5n7d" (UID: "8359ad74-2a40-4f5f-afe6-880a3f0a990e") : secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.646248 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.673976 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.698855 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.699856 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.708880 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-sczml" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.718114 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq2rk\" (UniqueName: \"kubernetes.io/projected/167d35d1-8eb3-492e-beb3-4325d183c7b9-kube-api-access-zq2rk\") pod \"octavia-operator-controller-manager-7bd9774b6-m2sb7\" (UID: \"167d35d1-8eb3-492e-beb3-4325d183c7b9\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.731265 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.733828 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbqls\" (UniqueName: \"kubernetes.io/projected/d36ac685-507d-4cfa-b6fe-7f595536c32f-kube-api-access-xbqls\") pod \"ovn-operator-controller-manager-55db956ddc-5rkch\" (UID: \"d36ac685-507d-4cfa-b6fe-7f595536c32f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.741068 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn2sm\" (UniqueName: \"kubernetes.io/projected/99f43d3e-dce3-4f53-90a5-76793663baaf-kube-api-access-rn2sm\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.741132 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjrd7\" (UniqueName: \"kubernetes.io/projected/72fb2e87-da8d-4db1-b255-d38d7c15b5cd-kube-api-access-zjrd7\") pod \"swift-operator-controller-manager-547cbdb99f-t97lk\" (UID: \"72fb2e87-da8d-4db1-b255-d38d7c15b5cd\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.741224 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.741260 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28f2m\" (UniqueName: \"kubernetes.io/projected/3369528a-f39f-4e47-92e9-abbca4395b98-kube-api-access-28f2m\") pod \"placement-operator-controller-manager-5d646b7d76-kklnd\" (UID: \"3369528a-f39f-4e47-92e9-abbca4395b98\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 11:08:28 crc kubenswrapper[4689]: E0123 11:08:28.742529 4689 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:28 crc kubenswrapper[4689]: E0123 11:08:28.742576 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert podName:99f43d3e-dce3-4f53-90a5-76793663baaf nodeName:}" failed. No retries permitted until 2026-01-23 11:08:29.242559139 +0000 UTC m=+1173.867238998 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" (UID: "99f43d3e-dce3-4f53-90a5-76793663baaf") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.742712 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.743849 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.750050 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.751567 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.769014 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-qdth7" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.773635 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.774724 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.778651 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28f2m\" (UniqueName: \"kubernetes.io/projected/3369528a-f39f-4e47-92e9-abbca4395b98-kube-api-access-28f2m\") pod \"placement-operator-controller-manager-5d646b7d76-kklnd\" (UID: \"3369528a-f39f-4e47-92e9-abbca4395b98\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.778912 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjrd7\" (UniqueName: \"kubernetes.io/projected/72fb2e87-da8d-4db1-b255-d38d7c15b5cd-kube-api-access-zjrd7\") pod \"swift-operator-controller-manager-547cbdb99f-t97lk\" (UID: \"72fb2e87-da8d-4db1-b255-d38d7c15b5cd\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.781675 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-mfz9c" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.783843 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn2sm\" (UniqueName: \"kubernetes.io/projected/99f43d3e-dce3-4f53-90a5-76793663baaf-kube-api-access-rn2sm\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.790899 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.842895 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hc4c\" (UniqueName: \"kubernetes.io/projected/6e48e594-66b4-4d88-823f-2ed90fa79d66-kube-api-access-5hc4c\") pod \"telemetry-operator-controller-manager-94c58dc69-75pd4\" (UID: \"6e48e594-66b4-4d88-823f-2ed90fa79d66\") " pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.848956 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.881861 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.884983 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.891475 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.898518 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-d6kdw" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.898936 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.900809 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.927544 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.931878 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.983653 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75"] Jan 23 11:08:28 crc kubenswrapper[4689]: I0123 11:08:28.995970 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.002687 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-5zmmk" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.014440 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9wqp\" (UniqueName: \"kubernetes.io/projected/4922b965-fa40-47b5-b388-e63767b62a97-kube-api-access-v9wqp\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.016881 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hc4c\" (UniqueName: \"kubernetes.io/projected/6e48e594-66b4-4d88-823f-2ed90fa79d66-kube-api-access-5hc4c\") pod \"telemetry-operator-controller-manager-94c58dc69-75pd4\" (UID: \"6e48e594-66b4-4d88-823f-2ed90fa79d66\") " pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.016987 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v57sg\" (UniqueName: \"kubernetes.io/projected/7ad0b754-e721-4b19-b0b6-a7e1200a48d4-kube-api-access-v57sg\") pod \"test-operator-controller-manager-69797bbcbd-rszc5\" (UID: \"7ad0b754-e721-4b19-b0b6-a7e1200a48d4\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.017072 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.021536 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.021611 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxmwn\" (UniqueName: \"kubernetes.io/projected/f451d39d-2f3f-4c53-b5a2-d8e7f74247f9-kube-api-access-pxmwn\") pod \"watcher-operator-controller-manager-5ffb9c6597-j79ts\" (UID: \"f451d39d-2f3f-4c53-b5a2-d8e7f74247f9\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.022203 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75"] Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.027208 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.081556 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hc4c\" (UniqueName: \"kubernetes.io/projected/6e48e594-66b4-4d88-823f-2ed90fa79d66-kube-api-access-5hc4c\") pod \"telemetry-operator-controller-manager-94c58dc69-75pd4\" (UID: \"6e48e594-66b4-4d88-823f-2ed90fa79d66\") " pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.125195 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v57sg\" (UniqueName: \"kubernetes.io/projected/7ad0b754-e721-4b19-b0b6-a7e1200a48d4-kube-api-access-v57sg\") pod \"test-operator-controller-manager-69797bbcbd-rszc5\" (UID: \"7ad0b754-e721-4b19-b0b6-a7e1200a48d4\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.125261 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.125296 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.125319 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxmwn\" (UniqueName: \"kubernetes.io/projected/f451d39d-2f3f-4c53-b5a2-d8e7f74247f9-kube-api-access-pxmwn\") pod \"watcher-operator-controller-manager-5ffb9c6597-j79ts\" (UID: \"f451d39d-2f3f-4c53-b5a2-d8e7f74247f9\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.125368 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9wqp\" (UniqueName: \"kubernetes.io/projected/4922b965-fa40-47b5-b388-e63767b62a97-kube-api-access-v9wqp\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.126429 4689 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.126477 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:29.626462569 +0000 UTC m=+1174.251142428 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "metrics-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.126605 4689 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.126634 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:29.626627413 +0000 UTC m=+1174.251307272 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.158072 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd"] Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.173487 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v57sg\" (UniqueName: \"kubernetes.io/projected/7ad0b754-e721-4b19-b0b6-a7e1200a48d4-kube-api-access-v57sg\") pod \"test-operator-controller-manager-69797bbcbd-rszc5\" (UID: \"7ad0b754-e721-4b19-b0b6-a7e1200a48d4\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.173665 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9wqp\" (UniqueName: \"kubernetes.io/projected/4922b965-fa40-47b5-b388-e63767b62a97-kube-api-access-v9wqp\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.182583 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxmwn\" (UniqueName: \"kubernetes.io/projected/f451d39d-2f3f-4c53-b5a2-d8e7f74247f9-kube-api-access-pxmwn\") pod \"watcher-operator-controller-manager-5ffb9c6597-j79ts\" (UID: \"f451d39d-2f3f-4c53-b5a2-d8e7f74247f9\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.197666 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt"] Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.227543 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqj6n\" (UniqueName: \"kubernetes.io/projected/b1a77706-f3e5-48b9-95b8-5f13daa0d29f-kube-api-access-sqj6n\") pod \"rabbitmq-cluster-operator-manager-668c99d594-b2s75\" (UID: \"b1a77706-f3e5-48b9-95b8-5f13daa0d29f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.230432 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677"] Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.328559 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.329916 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqj6n\" (UniqueName: \"kubernetes.io/projected/b1a77706-f3e5-48b9-95b8-5f13daa0d29f-kube-api-access-sqj6n\") pod \"rabbitmq-cluster-operator-manager-668c99d594-b2s75\" (UID: \"b1a77706-f3e5-48b9-95b8-5f13daa0d29f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.329976 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.330227 4689 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.330282 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert podName:99f43d3e-dce3-4f53-90a5-76793663baaf nodeName:}" failed. No retries permitted until 2026-01-23 11:08:30.33026407 +0000 UTC m=+1174.954943929 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" (UID: "99f43d3e-dce3-4f53-90a5-76793663baaf") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.358028 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqj6n\" (UniqueName: \"kubernetes.io/projected/b1a77706-f3e5-48b9-95b8-5f13daa0d29f-kube-api-access-sqj6n\") pod \"rabbitmq-cluster-operator-manager-668c99d594-b2s75\" (UID: \"b1a77706-f3e5-48b9-95b8-5f13daa0d29f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.358346 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v"] Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.400560 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.421813 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.543743 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.638525 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.638967 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.638917 4689 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.639018 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.639112 4689 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.639126 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert podName:8359ad74-2a40-4f5f-afe6-880a3f0a990e nodeName:}" failed. No retries permitted until 2026-01-23 11:08:31.639091551 +0000 UTC m=+1176.263771480 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert") pod "infra-operator-controller-manager-54ccf4f85d-l5n7d" (UID: "8359ad74-2a40-4f5f-afe6-880a3f0a990e") : secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.639179 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:30.639170643 +0000 UTC m=+1175.263850612 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "metrics-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.639445 4689 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: E0123 11:08:29.639587 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:30.639577643 +0000 UTC m=+1175.264257592 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "webhook-server-cert" not found Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.715422 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h"] Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.733639 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn"] Jan 23 11:08:29 crc kubenswrapper[4689]: W0123 11:08:29.770237 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54403d19_67da_4783_8b45_b7070bc15424.slice/crio-5f2337f3423784888e81216ca88081f0b0c76d850103aeb6165245a792563bc3 WatchSource:0}: Error finding container 5f2337f3423784888e81216ca88081f0b0c76d850103aeb6165245a792563bc3: Status 404 returned error can't find the container with id 5f2337f3423784888e81216ca88081f0b0c76d850103aeb6165245a792563bc3 Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.847329 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" event={"ID":"d587cb55-dfd2-42e6-bb32-3a4202dd05c5","Type":"ContainerStarted","Data":"8e3342cf971f3a5554043837cb80f206678c426ad472b684e2de44cb35204670"} Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.848956 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" event={"ID":"d55b5d87-6f4b-4eb7-bfc7-025b936cebb9","Type":"ContainerStarted","Data":"d52fe32c3121d2432898a4258f0a19b980dcd4d6bd58283c8b2f45953c6c5e39"} Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.850364 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" event={"ID":"c9dc7063-1b29-40e1-b451-e9dc882e7476","Type":"ContainerStarted","Data":"ccd70b75bb12e7714aa577003d260d11c5ee341275bb2e8841002988997525b1"} Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.851501 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" event={"ID":"54403d19-67da-4783-8b45-b7070bc15424","Type":"ContainerStarted","Data":"5f2337f3423784888e81216ca88081f0b0c76d850103aeb6165245a792563bc3"} Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.855873 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" event={"ID":"1f6a7f15-609b-414e-8119-366afe98811f","Type":"ContainerStarted","Data":"8cb8fcfdfc205ba0dec21fa536e7b234a497246dd8687b98905389c7021d8859"} Jan 23 11:08:29 crc kubenswrapper[4689]: I0123 11:08:29.859320 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" event={"ID":"3d3561eb-7369-4466-b9ee-037e02b2c219","Type":"ContainerStarted","Data":"28b4272c54899e9a2b8dd125697deda3d18b10b312ae20e63fd8b5c4786327d7"} Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.202254 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r"] Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.224062 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9"] Jan 23 11:08:30 crc kubenswrapper[4689]: W0123 11:08:30.250618 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f4d15d8_f941_4082_ab51_3ecda5527f9b.slice/crio-e3d7251ea67a4b2afcd4b41ba42bd9040dd6a37c47587b3b542910ae607d92d2 WatchSource:0}: Error finding container e3d7251ea67a4b2afcd4b41ba42bd9040dd6a37c47587b3b542910ae607d92d2: Status 404 returned error can't find the container with id e3d7251ea67a4b2afcd4b41ba42bd9040dd6a37c47587b3b542910ae607d92d2 Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.272620 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn"] Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.342655 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7"] Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.357267 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:30 crc kubenswrapper[4689]: E0123 11:08:30.357459 4689 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:30 crc kubenswrapper[4689]: E0123 11:08:30.357513 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert podName:99f43d3e-dce3-4f53-90a5-76793663baaf nodeName:}" failed. No retries permitted until 2026-01-23 11:08:32.357499724 +0000 UTC m=+1176.982179583 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" (UID: "99f43d3e-dce3-4f53-90a5-76793663baaf") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.662865 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.663141 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:30 crc kubenswrapper[4689]: E0123 11:08:30.663290 4689 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 11:08:30 crc kubenswrapper[4689]: E0123 11:08:30.663335 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:32.663322099 +0000 UTC m=+1177.288001958 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "webhook-server-cert" not found Jan 23 11:08:30 crc kubenswrapper[4689]: E0123 11:08:30.665363 4689 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 11:08:30 crc kubenswrapper[4689]: E0123 11:08:30.665472 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:32.665447932 +0000 UTC m=+1177.290127871 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "metrics-server-cert" not found Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.802840 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7"] Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.835847 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch"] Jan 23 11:08:30 crc kubenswrapper[4689]: W0123 11:08:30.857756 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5e62e31_60a7_4964_b3e7_611e7a8bfa81.slice/crio-85e78ee4a8d71f09f62cb5b44a04887ce27289ae47d5db969fe574e431c49ae9 WatchSource:0}: Error finding container 85e78ee4a8d71f09f62cb5b44a04887ce27289ae47d5db969fe574e431c49ae9: Status 404 returned error can't find the container with id 85e78ee4a8d71f09f62cb5b44a04887ce27289ae47d5db969fe574e431c49ae9 Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.896726 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn"] Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.908604 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" event={"ID":"28a286e0-4072-40b0-aa95-4a12299f5a72","Type":"ContainerStarted","Data":"fcabfd0caa4ec029985f6bbf7c2b6800650dcd891670ee55e217fffad14630e6"} Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.924771 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" event={"ID":"2bd7a193-5394-452e-9315-0332e4a4e667","Type":"ContainerStarted","Data":"b9093c4c87a03e3d824659c8a9afe6f8170d34bbdc57e63976d7a25da0e37d66"} Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.929375 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" event={"ID":"39db2be1-cb37-4ca9-af8a-5ce0f2d1db16","Type":"ContainerStarted","Data":"aa7e9a92c6120c9ea196b2675a06f274869bb6f9c838094437ebb39900fa2fe7"} Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.932183 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" event={"ID":"d36ac685-507d-4cfa-b6fe-7f595536c32f","Type":"ContainerStarted","Data":"a910ea2426df83211652aaf76789528cecf09b4dfc91704859b1cdf77801c616"} Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.933694 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" event={"ID":"a9f05c03-72c2-4906-b327-df50d5922d28","Type":"ContainerStarted","Data":"3f3c52695f8426b5ccaf877c794aeb9c7f2f4fe1f1f5ad7894786a11a5f6ac5b"} Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.936095 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" event={"ID":"5f4d15d8-f941-4082-ab51-3ecda5527f9b","Type":"ContainerStarted","Data":"e3d7251ea67a4b2afcd4b41ba42bd9040dd6a37c47587b3b542910ae607d92d2"} Jan 23 11:08:30 crc kubenswrapper[4689]: I0123 11:08:30.997478 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk"] Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.015431 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd"] Jan 23 11:08:31 crc kubenswrapper[4689]: W0123 11:08:31.073721 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3369528a_f39f_4e47_92e9_abbca4395b98.slice/crio-f2362674e7ba9b7755e53b6175bbec070f4563db14c55f08bb8b3e5d6b890b58 WatchSource:0}: Error finding container f2362674e7ba9b7755e53b6175bbec070f4563db14c55f08bb8b3e5d6b890b58: Status 404 returned error can't find the container with id f2362674e7ba9b7755e53b6175bbec070f4563db14c55f08bb8b3e5d6b890b58 Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.459976 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts"] Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.484621 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5"] Jan 23 11:08:31 crc kubenswrapper[4689]: W0123 11:08:31.501285 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf451d39d_2f3f_4c53_b5a2_d8e7f74247f9.slice/crio-d1ad4169041fde14d7d354d147ef49eb0a5982dd3fb307dc1b0729943a1375e0 WatchSource:0}: Error finding container d1ad4169041fde14d7d354d147ef49eb0a5982dd3fb307dc1b0729943a1375e0: Status 404 returned error can't find the container with id d1ad4169041fde14d7d354d147ef49eb0a5982dd3fb307dc1b0729943a1375e0 Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.560161 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7"] Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.581552 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4"] Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.596498 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75"] Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.708038 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:31 crc kubenswrapper[4689]: E0123 11:08:31.708195 4689 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:31 crc kubenswrapper[4689]: E0123 11:08:31.708253 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert podName:8359ad74-2a40-4f5f-afe6-880a3f0a990e nodeName:}" failed. No retries permitted until 2026-01-23 11:08:35.708232705 +0000 UTC m=+1180.332912564 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert") pod "infra-operator-controller-manager-54ccf4f85d-l5n7d" (UID: "8359ad74-2a40-4f5f-afe6-880a3f0a990e") : secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.958123 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" event={"ID":"6e48e594-66b4-4d88-823f-2ed90fa79d66","Type":"ContainerStarted","Data":"28786242c5fd4e51b8794173d5269faf7503e30487c2e07583d1e57df49097d7"} Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.961430 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" event={"ID":"b5e62e31-60a7-4964-b3e7-611e7a8bfa81","Type":"ContainerStarted","Data":"85e78ee4a8d71f09f62cb5b44a04887ce27289ae47d5db969fe574e431c49ae9"} Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.971523 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" event={"ID":"7ad0b754-e721-4b19-b0b6-a7e1200a48d4","Type":"ContainerStarted","Data":"f0305b07c4a952b230c2d1982be94f2493b1cf63f108ad588f11a6febb383007"} Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.975020 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" event={"ID":"b1a77706-f3e5-48b9-95b8-5f13daa0d29f","Type":"ContainerStarted","Data":"af3a7edce91257c29b678820a267f3885d9e36615093a8b8191202d7860e935c"} Jan 23 11:08:31 crc kubenswrapper[4689]: I0123 11:08:31.978183 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" event={"ID":"f451d39d-2f3f-4c53-b5a2-d8e7f74247f9","Type":"ContainerStarted","Data":"d1ad4169041fde14d7d354d147ef49eb0a5982dd3fb307dc1b0729943a1375e0"} Jan 23 11:08:32 crc kubenswrapper[4689]: I0123 11:08:32.000112 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" event={"ID":"72fb2e87-da8d-4db1-b255-d38d7c15b5cd","Type":"ContainerStarted","Data":"1cedf898792cbbbbf4fe18b1aed0356b2198dd6d9066f1a1451fd7a6e468e46d"} Jan 23 11:08:32 crc kubenswrapper[4689]: I0123 11:08:32.002195 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" event={"ID":"167d35d1-8eb3-492e-beb3-4325d183c7b9","Type":"ContainerStarted","Data":"4656e16dd200a52cd4287a0041a0ecd9d7cc9e5da476d1a6dc653434eb2374c0"} Jan 23 11:08:32 crc kubenswrapper[4689]: I0123 11:08:32.004254 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" event={"ID":"3369528a-f39f-4e47-92e9-abbca4395b98","Type":"ContainerStarted","Data":"f2362674e7ba9b7755e53b6175bbec070f4563db14c55f08bb8b3e5d6b890b58"} Jan 23 11:08:32 crc kubenswrapper[4689]: I0123 11:08:32.427484 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:32 crc kubenswrapper[4689]: E0123 11:08:32.427862 4689 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:32 crc kubenswrapper[4689]: E0123 11:08:32.427907 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert podName:99f43d3e-dce3-4f53-90a5-76793663baaf nodeName:}" failed. No retries permitted until 2026-01-23 11:08:36.427892579 +0000 UTC m=+1181.052572438 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" (UID: "99f43d3e-dce3-4f53-90a5-76793663baaf") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:32 crc kubenswrapper[4689]: I0123 11:08:32.731911 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:32 crc kubenswrapper[4689]: I0123 11:08:32.731976 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:32 crc kubenswrapper[4689]: E0123 11:08:32.732113 4689 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 11:08:32 crc kubenswrapper[4689]: E0123 11:08:32.732195 4689 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 11:08:32 crc kubenswrapper[4689]: E0123 11:08:32.732228 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:36.732207056 +0000 UTC m=+1181.356886915 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "metrics-server-cert" not found Jan 23 11:08:32 crc kubenswrapper[4689]: E0123 11:08:32.732269 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:36.732249537 +0000 UTC m=+1181.356929466 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "webhook-server-cert" not found Jan 23 11:08:35 crc kubenswrapper[4689]: I0123 11:08:35.797046 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:35 crc kubenswrapper[4689]: E0123 11:08:35.797341 4689 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:35 crc kubenswrapper[4689]: E0123 11:08:35.797582 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert podName:8359ad74-2a40-4f5f-afe6-880a3f0a990e nodeName:}" failed. No retries permitted until 2026-01-23 11:08:43.797564987 +0000 UTC m=+1188.422244846 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert") pod "infra-operator-controller-manager-54ccf4f85d-l5n7d" (UID: "8359ad74-2a40-4f5f-afe6-880a3f0a990e") : secret "infra-operator-webhook-server-cert" not found Jan 23 11:08:36 crc kubenswrapper[4689]: I0123 11:08:36.507329 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:36 crc kubenswrapper[4689]: E0123 11:08:36.507661 4689 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:36 crc kubenswrapper[4689]: E0123 11:08:36.507724 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert podName:99f43d3e-dce3-4f53-90a5-76793663baaf nodeName:}" failed. No retries permitted until 2026-01-23 11:08:44.507706863 +0000 UTC m=+1189.132386722 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" (UID: "99f43d3e-dce3-4f53-90a5-76793663baaf") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 11:08:36 crc kubenswrapper[4689]: I0123 11:08:36.810842 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:36 crc kubenswrapper[4689]: I0123 11:08:36.810903 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:36 crc kubenswrapper[4689]: E0123 11:08:36.811245 4689 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 11:08:36 crc kubenswrapper[4689]: E0123 11:08:36.811337 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:44.811315753 +0000 UTC m=+1189.435995612 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "webhook-server-cert" not found Jan 23 11:08:36 crc kubenswrapper[4689]: E0123 11:08:36.812053 4689 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 11:08:36 crc kubenswrapper[4689]: E0123 11:08:36.812128 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:08:44.812111933 +0000 UTC m=+1189.436791792 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "metrics-server-cert" not found Jan 23 11:08:43 crc kubenswrapper[4689]: I0123 11:08:43.851703 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:44 crc kubenswrapper[4689]: I0123 11:08:44.564905 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:44 crc kubenswrapper[4689]: I0123 11:08:44.569236 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f43d3e-dce3-4f53-90a5-76793663baaf-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8545zczg\" (UID: \"99f43d3e-dce3-4f53-90a5-76793663baaf\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:44 crc kubenswrapper[4689]: I0123 11:08:44.733335 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:08:44 crc kubenswrapper[4689]: I0123 11:08:44.869836 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:44 crc kubenswrapper[4689]: I0123 11:08:44.869899 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:08:44 crc kubenswrapper[4689]: E0123 11:08:44.870063 4689 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 11:08:44 crc kubenswrapper[4689]: E0123 11:08:44.870171 4689 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 11:08:44 crc kubenswrapper[4689]: E0123 11:08:44.870199 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:09:00.870174219 +0000 UTC m=+1205.494854148 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "metrics-server-cert" not found Jan 23 11:08:44 crc kubenswrapper[4689]: E0123 11:08:44.870248 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs podName:4922b965-fa40-47b5-b388-e63767b62a97 nodeName:}" failed. No retries permitted until 2026-01-23 11:09:00.870230651 +0000 UTC m=+1205.494910510 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs") pod "openstack-operator-controller-manager-7c8c5b48f6-4bw4b" (UID: "4922b965-fa40-47b5-b388-e63767b62a97") : secret "webhook-server-cert" not found Jan 23 11:08:44 crc kubenswrapper[4689]: I0123 11:08:44.872692 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8359ad74-2a40-4f5f-afe6-880a3f0a990e-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-l5n7d\" (UID: \"8359ad74-2a40-4f5f-afe6-880a3f0a990e\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:44 crc kubenswrapper[4689]: I0123 11:08:44.993224 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:08:51 crc kubenswrapper[4689]: E0123 11:08:51.742937 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831" Jan 23 11:08:51 crc kubenswrapper[4689]: E0123 11:08:51.743677 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nc8dg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-6b8bc8d87d-2lwwn_openstack-operators(2bd7a193-5394-452e-9315-0332e4a4e667): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:08:51 crc kubenswrapper[4689]: E0123 11:08:51.745066 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" podUID="2bd7a193-5394-452e-9315-0332e4a4e667" Jan 23 11:08:52 crc kubenswrapper[4689]: E0123 11:08:52.222544 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" podUID="2bd7a193-5394-452e-9315-0332e4a4e667" Jan 23 11:08:52 crc kubenswrapper[4689]: E0123 11:08:52.323487 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922" Jan 23 11:08:52 crc kubenswrapper[4689]: E0123 11:08:52.323654 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zjrd7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-t97lk_openstack-operators(72fb2e87-da8d-4db1-b255-d38d7c15b5cd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:08:52 crc kubenswrapper[4689]: E0123 11:08:52.324877 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" podUID="72fb2e87-da8d-4db1-b255-d38d7c15b5cd" Jan 23 11:08:53 crc kubenswrapper[4689]: E0123 11:08:53.232390 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" podUID="72fb2e87-da8d-4db1-b255-d38d7c15b5cd" Jan 23 11:08:55 crc kubenswrapper[4689]: E0123 11:08:55.632635 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:b57d65d2a968705b9067192a7cb33bd4a12489db87e1d05de78c076f2062cab4" Jan 23 11:08:55 crc kubenswrapper[4689]: E0123 11:08:55.633222 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:b57d65d2a968705b9067192a7cb33bd4a12489db87e1d05de78c076f2062cab4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mcv79,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5d8f59fb49-rt7xn_openstack-operators(a9f05c03-72c2-4906-b327-df50d5922d28): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:08:55 crc kubenswrapper[4689]: E0123 11:08:55.634318 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" Jan 23 11:08:56 crc kubenswrapper[4689]: E0123 11:08:56.260654 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:b57d65d2a968705b9067192a7cb33bd4a12489db87e1d05de78c076f2062cab4\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" Jan 23 11:08:56 crc kubenswrapper[4689]: E0123 11:08:56.327979 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf" Jan 23 11:08:56 crc kubenswrapper[4689]: E0123 11:08:56.328180 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xbqls,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-55db956ddc-5rkch_openstack-operators(d36ac685-507d-4cfa-b6fe-7f595536c32f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:08:56 crc kubenswrapper[4689]: E0123 11:08:56.329735 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" podUID="d36ac685-507d-4cfa-b6fe-7f595536c32f" Jan 23 11:08:56 crc kubenswrapper[4689]: E0123 11:08:56.834169 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8" Jan 23 11:08:56 crc kubenswrapper[4689]: E0123 11:08:56.834329 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vltff,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-78c6999f6f-szbq7_openstack-operators(28a286e0-4072-40b0-aa95-4a12299f5a72): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:08:56 crc kubenswrapper[4689]: E0123 11:08:56.836322 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" Jan 23 11:08:57 crc kubenswrapper[4689]: E0123 11:08:57.270462 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" Jan 23 11:08:57 crc kubenswrapper[4689]: E0123 11:08:57.270492 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" podUID="d36ac685-507d-4cfa-b6fe-7f595536c32f" Jan 23 11:08:58 crc kubenswrapper[4689]: E0123 11:08:58.913247 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b" Jan 23 11:08:58 crc kubenswrapper[4689]: E0123 11:08:58.913474 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pxmwn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5ffb9c6597-j79ts_openstack-operators(f451d39d-2f3f-4c53-b5a2-d8e7f74247f9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:08:58 crc kubenswrapper[4689]: E0123 11:08:58.914710 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" podUID="f451d39d-2f3f-4c53-b5a2-d8e7f74247f9" Jan 23 11:08:59 crc kubenswrapper[4689]: E0123 11:08:59.284512 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" podUID="f451d39d-2f3f-4c53-b5a2-d8e7f74247f9" Jan 23 11:09:00 crc kubenswrapper[4689]: I0123 11:09:00.889086 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:09:00 crc kubenswrapper[4689]: I0123 11:09:00.889523 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:09:00 crc kubenswrapper[4689]: I0123 11:09:00.896038 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-webhook-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:09:00 crc kubenswrapper[4689]: I0123 11:09:00.899349 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4922b965-fa40-47b5-b388-e63767b62a97-metrics-certs\") pod \"openstack-operator-controller-manager-7c8c5b48f6-4bw4b\" (UID: \"4922b965-fa40-47b5-b388-e63767b62a97\") " pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:09:01 crc kubenswrapper[4689]: I0123 11:09:01.069514 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-d6kdw" Jan 23 11:09:01 crc kubenswrapper[4689]: I0123 11:09:01.077816 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:09:01 crc kubenswrapper[4689]: I0123 11:09:01.761315 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:09:01 crc kubenswrapper[4689]: I0123 11:09:01.778506 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z5sn9" podUID="90c7af03-d2b6-45ef-b228-d5621bf1f671" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 23 11:09:03 crc kubenswrapper[4689]: E0123 11:09:03.680410 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337" Jan 23 11:09:03 crc kubenswrapper[4689]: E0123 11:09:03.680865 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r72gx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-78fdd796fd-dl6g9_openstack-operators(39db2be1-cb37-4ca9-af8a-5ce0f2d1db16): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:03 crc kubenswrapper[4689]: E0123 11:09:03.682088 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podUID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" Jan 23 11:09:04 crc kubenswrapper[4689]: E0123 11:09:04.327961 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337\\\"\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podUID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" Jan 23 11:09:04 crc kubenswrapper[4689]: E0123 11:09:04.616745 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f" Jan 23 11:09:04 crc kubenswrapper[4689]: E0123 11:09:04.617142 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hdjqp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-69cf5d4557-q2clt_openstack-operators(d587cb55-dfd2-42e6-bb32-3a4202dd05c5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:04 crc kubenswrapper[4689]: E0123 11:09:04.618521 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podUID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" Jan 23 11:09:05 crc kubenswrapper[4689]: E0123 11:09:05.178509 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d" Jan 23 11:09:05 crc kubenswrapper[4689]: E0123 11:09:05.178949 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v57sg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-rszc5_openstack-operators(7ad0b754-e721-4b19-b0b6-a7e1200a48d4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:05 crc kubenswrapper[4689]: E0123 11:09:05.180652 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" podUID="7ad0b754-e721-4b19-b0b6-a7e1200a48d4" Jan 23 11:09:05 crc kubenswrapper[4689]: E0123 11:09:05.334182 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" podUID="7ad0b754-e721-4b19-b0b6-a7e1200a48d4" Jan 23 11:09:05 crc kubenswrapper[4689]: E0123 11:09:05.334491 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podUID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" Jan 23 11:09:10 crc kubenswrapper[4689]: E0123 11:09:10.042100 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b" Jan 23 11:09:10 crc kubenswrapper[4689]: E0123 11:09:10.042979 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j2dlz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-59dd8b7cbf-86c6v_openstack-operators(c9dc7063-1b29-40e1-b451-e9dc882e7476): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:10 crc kubenswrapper[4689]: E0123 11:09:10.044185 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" Jan 23 11:09:10 crc kubenswrapper[4689]: E0123 11:09:10.372912 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" Jan 23 11:09:10 crc kubenswrapper[4689]: E0123 11:09:10.769535 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822" Jan 23 11:09:10 crc kubenswrapper[4689]: E0123 11:09:10.769767 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wmtwc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-77d5c5b54f-cgxb7_openstack-operators(b5e62e31-60a7-4964-b3e7-611e7a8bfa81): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:10 crc kubenswrapper[4689]: E0123 11:09:10.771364 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" podUID="b5e62e31-60a7-4964-b3e7-611e7a8bfa81" Jan 23 11:09:11 crc kubenswrapper[4689]: E0123 11:09:11.377118 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" podUID="b5e62e31-60a7-4964-b3e7-611e7a8bfa81" Jan 23 11:09:11 crc kubenswrapper[4689]: E0123 11:09:11.870642 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0" Jan 23 11:09:11 crc kubenswrapper[4689]: E0123 11:09:11.870802 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-28f2m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5d646b7d76-kklnd_openstack-operators(3369528a-f39f-4e47-92e9-abbca4395b98): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:11 crc kubenswrapper[4689]: E0123 11:09:11.872011 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" Jan 23 11:09:12 crc kubenswrapper[4689]: E0123 11:09:12.350054 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 23 11:09:12 crc kubenswrapper[4689]: E0123 11:09:12.350280 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sqj6n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-b2s75_openstack-operators(b1a77706-f3e5-48b9-95b8-5f13daa0d29f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:12 crc kubenswrapper[4689]: E0123 11:09:12.351569 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" podUID="b1a77706-f3e5-48b9-95b8-5f13daa0d29f" Jan 23 11:09:12 crc kubenswrapper[4689]: E0123 11:09:12.554514 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" podUID="b1a77706-f3e5-48b9-95b8-5f13daa0d29f" Jan 23 11:09:12 crc kubenswrapper[4689]: E0123 11:09:12.555499 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.064953 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.065124 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zq2rk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7bd9774b6-m2sb7_openstack-operators(167d35d1-8eb3-492e-beb3-4325d183c7b9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.066359 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podUID="167d35d1-8eb3-492e-beb3-4325d183c7b9" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.240984 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.98:5001/openstack-k8s-operators/telemetry-operator:98802705a6009906bd674da304de45e781658a53" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.241472 4689 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.98:5001/openstack-k8s-operators/telemetry-operator:98802705a6009906bd674da304de45e781658a53" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.241695 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.98:5001/openstack-k8s-operators/telemetry-operator:98802705a6009906bd674da304de45e781658a53,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5hc4c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-94c58dc69-75pd4_openstack-operators(6e48e594-66b4-4d88-823f-2ed90fa79d66): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.242901 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.399990 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podUID="167d35d1-8eb3-492e-beb3-4325d183c7b9" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.402998 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.98:5001/openstack-k8s-operators/telemetry-operator:98802705a6009906bd674da304de45e781658a53\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.812077 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.812900 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jwmhs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-b8s9h_openstack-operators(d55b5d87-6f4b-4eb7-bfc7-025b936cebb9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:09:13 crc kubenswrapper[4689]: E0123 11:09:13.819522 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podUID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.428232 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b"] Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.428964 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" event={"ID":"54403d19-67da-4783-8b45-b7070bc15424","Type":"ContainerStarted","Data":"4c9d3b565f75fab4b3494dcdff98af5885c61dcab588c19ad536c3b08a941da3"} Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.429083 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.453037 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" event={"ID":"1f6a7f15-609b-414e-8119-366afe98811f","Type":"ContainerStarted","Data":"6ef91b3993663b9fbf14d57c88607e3ef317bce08b95696400ada06c891ce577"} Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.453566 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.458574 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podStartSLOduration=4.016468398 podStartE2EDuration="47.458529204s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:29.783952116 +0000 UTC m=+1174.408631975" lastFinishedPulling="2026-01-23 11:09:13.226012922 +0000 UTC m=+1217.850692781" observedRunningTime="2026-01-23 11:09:14.453323644 +0000 UTC m=+1219.078003503" watchObservedRunningTime="2026-01-23 11:09:14.458529204 +0000 UTC m=+1219.083209063" Jan 23 11:09:14 crc kubenswrapper[4689]: W0123 11:09:14.459270 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4922b965_fa40_47b5_b388_e63767b62a97.slice/crio-a4035186b956f4f5e88d865915f3f337aaf1a64fc2eecfbf1c9134a62eb7556e WatchSource:0}: Error finding container a4035186b956f4f5e88d865915f3f337aaf1a64fc2eecfbf1c9134a62eb7556e: Status 404 returned error can't find the container with id a4035186b956f4f5e88d865915f3f337aaf1a64fc2eecfbf1c9134a62eb7556e Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.460518 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" event={"ID":"3d3561eb-7369-4466-b9ee-037e02b2c219","Type":"ContainerStarted","Data":"02fcdba4ae0517ab65a02ae2a217999286dcdecc9b70247c81169a00a58db698"} Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.460743 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.483413 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" event={"ID":"28a286e0-4072-40b0-aa95-4a12299f5a72","Type":"ContainerStarted","Data":"ae66ba54a8d8aa8d294822d67c78a0cfeeae3a8339ea4505ec06bff6858dc64d"} Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.484446 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.509534 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" podStartSLOduration=3.136783979 podStartE2EDuration="47.50950608s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:28.853274731 +0000 UTC m=+1173.477954590" lastFinishedPulling="2026-01-23 11:09:13.225996832 +0000 UTC m=+1217.850676691" observedRunningTime="2026-01-23 11:09:14.497689254 +0000 UTC m=+1219.122369113" watchObservedRunningTime="2026-01-23 11:09:14.50950608 +0000 UTC m=+1219.134185939" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.511132 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" event={"ID":"a9f05c03-72c2-4906-b327-df50d5922d28","Type":"ContainerStarted","Data":"88f55ee37f1a238a42ca14ce9589a8e9bf6e8c2e9ce65987b81dc450d922e7be"} Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.512708 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 11:09:14 crc kubenswrapper[4689]: E0123 11:09:14.514052 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podUID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.552866 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podStartSLOduration=4.082156603 podStartE2EDuration="47.552842615s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:30.387248198 +0000 UTC m=+1175.011928057" lastFinishedPulling="2026-01-23 11:09:13.85793421 +0000 UTC m=+1218.482614069" observedRunningTime="2026-01-23 11:09:14.526079185 +0000 UTC m=+1219.150759054" watchObservedRunningTime="2026-01-23 11:09:14.552842615 +0000 UTC m=+1219.177522474" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.558722 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podStartSLOduration=3.516324399 podStartE2EDuration="47.558705222s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:29.184232705 +0000 UTC m=+1173.808912564" lastFinishedPulling="2026-01-23 11:09:13.226613528 +0000 UTC m=+1217.851293387" observedRunningTime="2026-01-23 11:09:14.544560258 +0000 UTC m=+1219.169240117" watchObservedRunningTime="2026-01-23 11:09:14.558705222 +0000 UTC m=+1219.183385081" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.623361 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podStartSLOduration=4.641373461 podStartE2EDuration="47.62334184s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:30.885968712 +0000 UTC m=+1175.510648571" lastFinishedPulling="2026-01-23 11:09:13.867937081 +0000 UTC m=+1218.492616950" observedRunningTime="2026-01-23 11:09:14.60574483 +0000 UTC m=+1219.230424689" watchObservedRunningTime="2026-01-23 11:09:14.62334184 +0000 UTC m=+1219.248021699" Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.757325 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d"] Jan 23 11:09:14 crc kubenswrapper[4689]: W0123 11:09:14.763378 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8359ad74_2a40_4f5f_afe6_880a3f0a990e.slice/crio-b492a8293ee5bc025b030ec52e93732c4ccde2c56b7e9a017d24624a4a1007c6 WatchSource:0}: Error finding container b492a8293ee5bc025b030ec52e93732c4ccde2c56b7e9a017d24624a4a1007c6: Status 404 returned error can't find the container with id b492a8293ee5bc025b030ec52e93732c4ccde2c56b7e9a017d24624a4a1007c6 Jan 23 11:09:14 crc kubenswrapper[4689]: W0123 11:09:14.795503 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99f43d3e_dce3_4f53_90a5_76793663baaf.slice/crio-22b932928f775d5021dc475d7c68c95841ebf0b625c6cefc3fd90ef967049c91 WatchSource:0}: Error finding container 22b932928f775d5021dc475d7c68c95841ebf0b625c6cefc3fd90ef967049c91: Status 404 returned error can't find the container with id 22b932928f775d5021dc475d7c68c95841ebf0b625c6cefc3fd90ef967049c91 Jan 23 11:09:14 crc kubenswrapper[4689]: I0123 11:09:14.799020 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg"] Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.527202 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" event={"ID":"8359ad74-2a40-4f5f-afe6-880a3f0a990e","Type":"ContainerStarted","Data":"b492a8293ee5bc025b030ec52e93732c4ccde2c56b7e9a017d24624a4a1007c6"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.530625 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" event={"ID":"f451d39d-2f3f-4c53-b5a2-d8e7f74247f9","Type":"ContainerStarted","Data":"06d5b593341eacea193741a2238185d9478479717081e0fb3893a5364a2ea84b"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.530873 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.532359 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" event={"ID":"72fb2e87-da8d-4db1-b255-d38d7c15b5cd","Type":"ContainerStarted","Data":"373dd3741255b656c7658abc5192c58adfe9fef3da3d9e2d7aa69ce601c83b26"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.532573 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.533980 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" event={"ID":"5f4d15d8-f941-4082-ab51-3ecda5527f9b","Type":"ContainerStarted","Data":"7639c1a58c928d80666ad2d99cd99080343a84f5acc53dc271feb3745392ffd4"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.534110 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.535570 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" event={"ID":"2bd7a193-5394-452e-9315-0332e4a4e667","Type":"ContainerStarted","Data":"5a4c7dcb3b69d6fe536600b96c035f8b9e4dccea8f5d05702d0965037a3c48e7"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.535755 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.537033 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" event={"ID":"d36ac685-507d-4cfa-b6fe-7f595536c32f","Type":"ContainerStarted","Data":"a50c724c621aeb491390ac358c12ca1c2f848851aff767863477263db4e81fb6"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.537365 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.538466 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" event={"ID":"99f43d3e-dce3-4f53-90a5-76793663baaf","Type":"ContainerStarted","Data":"22b932928f775d5021dc475d7c68c95841ebf0b625c6cefc3fd90ef967049c91"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.544436 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" event={"ID":"4922b965-fa40-47b5-b388-e63767b62a97","Type":"ContainerStarted","Data":"49aa2056ed8d32dc8f5f9a4763e0c4b749d6c60c8ad22236def4dfb0b79215a2"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.544490 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" event={"ID":"4922b965-fa40-47b5-b388-e63767b62a97","Type":"ContainerStarted","Data":"a4035186b956f4f5e88d865915f3f337aaf1a64fc2eecfbf1c9134a62eb7556e"} Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.554280 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" podStartSLOduration=4.780895046 podStartE2EDuration="47.554261172s" podCreationTimestamp="2026-01-23 11:08:28 +0000 UTC" firstStartedPulling="2026-01-23 11:08:31.560388954 +0000 UTC m=+1176.185068813" lastFinishedPulling="2026-01-23 11:09:14.33375508 +0000 UTC m=+1218.958434939" observedRunningTime="2026-01-23 11:09:15.551556834 +0000 UTC m=+1220.176236693" watchObservedRunningTime="2026-01-23 11:09:15.554261172 +0000 UTC m=+1220.178941031" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.571603 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" podStartSLOduration=5.549781919 podStartE2EDuration="48.571585575s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:30.845527809 +0000 UTC m=+1175.470207658" lastFinishedPulling="2026-01-23 11:09:13.867331455 +0000 UTC m=+1218.492011314" observedRunningTime="2026-01-23 11:09:15.566913009 +0000 UTC m=+1220.191592878" watchObservedRunningTime="2026-01-23 11:09:15.571585575 +0000 UTC m=+1220.196265434" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.590929 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" podStartSLOduration=5.06604806 podStartE2EDuration="48.590908719s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:30.341524963 +0000 UTC m=+1174.966204822" lastFinishedPulling="2026-01-23 11:09:13.866385612 +0000 UTC m=+1218.491065481" observedRunningTime="2026-01-23 11:09:15.583591816 +0000 UTC m=+1220.208271675" watchObservedRunningTime="2026-01-23 11:09:15.590908719 +0000 UTC m=+1220.215588578" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.634248 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podStartSLOduration=47.634227944 podStartE2EDuration="47.634227944s" podCreationTimestamp="2026-01-23 11:08:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:09:15.627629718 +0000 UTC m=+1220.252309597" watchObservedRunningTime="2026-01-23 11:09:15.634227944 +0000 UTC m=+1220.258907803" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.666227 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" podStartSLOduration=5.815635264 podStartE2EDuration="48.666208974s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:31.019990897 +0000 UTC m=+1175.644670756" lastFinishedPulling="2026-01-23 11:09:13.870564597 +0000 UTC m=+1218.495244466" observedRunningTime="2026-01-23 11:09:15.658582484 +0000 UTC m=+1220.283262343" watchObservedRunningTime="2026-01-23 11:09:15.666208974 +0000 UTC m=+1220.290888823" Jan 23 11:09:15 crc kubenswrapper[4689]: I0123 11:09:15.706026 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" podStartSLOduration=5.735697713 podStartE2EDuration="48.70599916s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:30.255728566 +0000 UTC m=+1174.880408425" lastFinishedPulling="2026-01-23 11:09:13.226029973 +0000 UTC m=+1217.850709872" observedRunningTime="2026-01-23 11:09:15.674975204 +0000 UTC m=+1220.299655073" watchObservedRunningTime="2026-01-23 11:09:15.70599916 +0000 UTC m=+1220.330679019" Jan 23 11:09:16 crc kubenswrapper[4689]: I0123 11:09:16.552677 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.425336 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.577680 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" event={"ID":"d587cb55-dfd2-42e6-bb32-3a4202dd05c5","Type":"ContainerStarted","Data":"d817518e10ded0a20c77dbb519c27df287aa0f4b77038b9fc486142ea60aab2d"} Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.577937 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.579697 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" event={"ID":"39db2be1-cb37-4ca9-af8a-5ce0f2d1db16","Type":"ContainerStarted","Data":"065b1a67e2dc6e27c49740632892692de3ce0e27cea59be1df19c3444e332806"} Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.580190 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.580840 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" event={"ID":"7ad0b754-e721-4b19-b0b6-a7e1200a48d4","Type":"ContainerStarted","Data":"79c19042317d1fe97b74a17bab64b7bf7a976590c8d5ae7134d35b51c560cf55"} Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.581323 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.603596 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podStartSLOduration=2.329103542 podStartE2EDuration="52.603574693s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:29.080409446 +0000 UTC m=+1173.705089305" lastFinishedPulling="2026-01-23 11:09:19.354880597 +0000 UTC m=+1223.979560456" observedRunningTime="2026-01-23 11:09:19.598612339 +0000 UTC m=+1224.223292198" watchObservedRunningTime="2026-01-23 11:09:19.603574693 +0000 UTC m=+1224.228254552" Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.650414 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podStartSLOduration=3.6677118589999997 podStartE2EDuration="52.650393115s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:30.23353124 +0000 UTC m=+1174.858211099" lastFinishedPulling="2026-01-23 11:09:19.216212496 +0000 UTC m=+1223.840892355" observedRunningTime="2026-01-23 11:09:19.627546253 +0000 UTC m=+1224.252226132" watchObservedRunningTime="2026-01-23 11:09:19.650393115 +0000 UTC m=+1224.275072974" Jan 23 11:09:19 crc kubenswrapper[4689]: I0123 11:09:19.651268 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" podStartSLOduration=3.970713435 podStartE2EDuration="51.651261657s" podCreationTimestamp="2026-01-23 11:08:28 +0000 UTC" firstStartedPulling="2026-01-23 11:08:31.602552869 +0000 UTC m=+1176.227232728" lastFinishedPulling="2026-01-23 11:09:19.283101091 +0000 UTC m=+1223.907780950" observedRunningTime="2026-01-23 11:09:19.643303377 +0000 UTC m=+1224.267983246" watchObservedRunningTime="2026-01-23 11:09:19.651261657 +0000 UTC m=+1224.275941526" Jan 23 11:09:21 crc kubenswrapper[4689]: I0123 11:09:21.089339 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 11:09:22 crc kubenswrapper[4689]: I0123 11:09:22.635734 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" event={"ID":"99f43d3e-dce3-4f53-90a5-76793663baaf","Type":"ContainerStarted","Data":"58352f3c3146b53b308d350cabbf3a9d19111830b2b8b294e1ce099e2323ea85"} Jan 23 11:09:22 crc kubenswrapper[4689]: I0123 11:09:22.636188 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:09:22 crc kubenswrapper[4689]: I0123 11:09:22.640306 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" event={"ID":"8359ad74-2a40-4f5f-afe6-880a3f0a990e","Type":"ContainerStarted","Data":"939f3a2aac68fd7213a2bb984c9b0345accceb9fbf27f1040d0d8f58c0057bcc"} Jan 23 11:09:22 crc kubenswrapper[4689]: I0123 11:09:22.640773 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:09:22 crc kubenswrapper[4689]: I0123 11:09:22.675726 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podStartSLOduration=48.29412185 podStartE2EDuration="55.675709153s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:09:14.810349971 +0000 UTC m=+1219.435029830" lastFinishedPulling="2026-01-23 11:09:22.191937264 +0000 UTC m=+1226.816617133" observedRunningTime="2026-01-23 11:09:22.670943994 +0000 UTC m=+1227.295623863" watchObservedRunningTime="2026-01-23 11:09:22.675709153 +0000 UTC m=+1227.300389012" Jan 23 11:09:22 crc kubenswrapper[4689]: I0123 11:09:22.695357 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" podStartSLOduration=48.272232492 podStartE2EDuration="55.695332844s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:09:14.766293008 +0000 UTC m=+1219.390972867" lastFinishedPulling="2026-01-23 11:09:22.18939336 +0000 UTC m=+1226.814073219" observedRunningTime="2026-01-23 11:09:22.684993076 +0000 UTC m=+1227.309672935" watchObservedRunningTime="2026-01-23 11:09:22.695332844 +0000 UTC m=+1227.320012703" Jan 23 11:09:23 crc kubenswrapper[4689]: I0123 11:09:23.652648 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" event={"ID":"c9dc7063-1b29-40e1-b451-e9dc882e7476","Type":"ContainerStarted","Data":"a00e670a2fc7896199c14ae596ab719780e9a042a4b14427aa186d8f5865f7fa"} Jan 23 11:09:23 crc kubenswrapper[4689]: I0123 11:09:23.653201 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 11:09:23 crc kubenswrapper[4689]: I0123 11:09:23.688948 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podStartSLOduration=3.037575935 podStartE2EDuration="56.688931406s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:29.411951875 +0000 UTC m=+1174.036631734" lastFinishedPulling="2026-01-23 11:09:23.063307326 +0000 UTC m=+1227.687987205" observedRunningTime="2026-01-23 11:09:23.685239093 +0000 UTC m=+1228.309918972" watchObservedRunningTime="2026-01-23 11:09:23.688931406 +0000 UTC m=+1228.313611265" Jan 23 11:09:24 crc kubenswrapper[4689]: I0123 11:09:24.664980 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" event={"ID":"b5e62e31-60a7-4964-b3e7-611e7a8bfa81","Type":"ContainerStarted","Data":"bfa358ceac7caa6a34be83da1dd2aa0c4e98c78661c0e6d4351376051e9bc705"} Jan 23 11:09:24 crc kubenswrapper[4689]: I0123 11:09:24.665474 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 11:09:24 crc kubenswrapper[4689]: I0123 11:09:24.733384 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" podStartSLOduration=4.477263993 podStartE2EDuration="57.73336237s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:30.88589118 +0000 UTC m=+1175.510571039" lastFinishedPulling="2026-01-23 11:09:24.141989537 +0000 UTC m=+1228.766669416" observedRunningTime="2026-01-23 11:09:24.723880403 +0000 UTC m=+1229.348560262" watchObservedRunningTime="2026-01-23 11:09:24.73336237 +0000 UTC m=+1229.358042229" Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.689847 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" event={"ID":"3369528a-f39f-4e47-92e9-abbca4395b98","Type":"ContainerStarted","Data":"da6f8c5470abbd67ebf7fded1b590e1d6e3441a90e12b60b5ea7f7ab81f06250"} Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.690567 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.693080 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" event={"ID":"6e48e594-66b4-4d88-823f-2ed90fa79d66","Type":"ContainerStarted","Data":"b508331b42f4214dc72bafcb3639996c2d4a5c8ee125e13d01a042b880f81f7b"} Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.693917 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.696999 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" event={"ID":"167d35d1-8eb3-492e-beb3-4325d183c7b9","Type":"ContainerStarted","Data":"2c5d14c17f4d9b1342dc30203394b0cd59e21d00892248c3f95710febffc7829"} Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.697175 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.714004 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podStartSLOduration=4.657167856 podStartE2EDuration="58.713984996s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:31.112776259 +0000 UTC m=+1175.737456118" lastFinishedPulling="2026-01-23 11:09:25.169593389 +0000 UTC m=+1229.794273258" observedRunningTime="2026-01-23 11:09:25.710963501 +0000 UTC m=+1230.335643390" watchObservedRunningTime="2026-01-23 11:09:25.713984996 +0000 UTC m=+1230.338664845" Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.732167 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podStartSLOduration=4.6615292870000005 podStartE2EDuration="57.732014208s" podCreationTimestamp="2026-01-23 11:08:28 +0000 UTC" firstStartedPulling="2026-01-23 11:08:31.641619417 +0000 UTC m=+1176.266299276" lastFinishedPulling="2026-01-23 11:09:24.712104338 +0000 UTC m=+1229.336784197" observedRunningTime="2026-01-23 11:09:25.731024863 +0000 UTC m=+1230.355704722" watchObservedRunningTime="2026-01-23 11:09:25.732014208 +0000 UTC m=+1230.356694067" Jan 23 11:09:25 crc kubenswrapper[4689]: I0123 11:09:25.749030 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podStartSLOduration=5.257438522 podStartE2EDuration="58.749012383s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:31.706365688 +0000 UTC m=+1176.331045547" lastFinishedPulling="2026-01-23 11:09:25.197939549 +0000 UTC m=+1229.822619408" observedRunningTime="2026-01-23 11:09:25.744265555 +0000 UTC m=+1230.368945414" watchObservedRunningTime="2026-01-23 11:09:25.749012383 +0000 UTC m=+1230.373692242" Jan 23 11:09:27 crc kubenswrapper[4689]: I0123 11:09:27.717488 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" event={"ID":"b1a77706-f3e5-48b9-95b8-5f13daa0d29f","Type":"ContainerStarted","Data":"df50f5f45c52f9d981f8f487a59fd2126408503d8b7b322713d9bee13c623344"} Jan 23 11:09:27 crc kubenswrapper[4689]: I0123 11:09:27.740322 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" podStartSLOduration=4.597807484 podStartE2EDuration="59.740296201s" podCreationTimestamp="2026-01-23 11:08:28 +0000 UTC" firstStartedPulling="2026-01-23 11:08:31.706363918 +0000 UTC m=+1176.331043777" lastFinishedPulling="2026-01-23 11:09:26.848852625 +0000 UTC m=+1231.473532494" observedRunningTime="2026-01-23 11:09:27.736638331 +0000 UTC m=+1232.361318240" watchObservedRunningTime="2026-01-23 11:09:27.740296201 +0000 UTC m=+1232.364976060" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.008486 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.058478 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.165631 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.249341 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.327041 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.328387 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.444979 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.529947 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.617790 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.744442 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" event={"ID":"d55b5d87-6f4b-4eb7-bfc7-025b936cebb9","Type":"ContainerStarted","Data":"8770209dbeff0684463277c6bc80ef4f207626e0798fda64fcead3dad54d7f4a"} Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.745547 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.755622 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.767655 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podStartSLOduration=3.385708005 podStartE2EDuration="1m1.767630769s" podCreationTimestamp="2026-01-23 11:08:27 +0000 UTC" firstStartedPulling="2026-01-23 11:08:29.771755462 +0000 UTC m=+1174.396435321" lastFinishedPulling="2026-01-23 11:09:28.153678236 +0000 UTC m=+1232.778358085" observedRunningTime="2026-01-23 11:09:28.761137228 +0000 UTC m=+1233.385817097" watchObservedRunningTime="2026-01-23 11:09:28.767630769 +0000 UTC m=+1233.392310628" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.853732 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 11:09:28 crc kubenswrapper[4689]: I0123 11:09:28.934746 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 11:09:29 crc kubenswrapper[4689]: I0123 11:09:29.332786 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 11:09:29 crc kubenswrapper[4689]: I0123 11:09:29.408618 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 11:09:33 crc kubenswrapper[4689]: I0123 11:09:33.310876 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:09:33 crc kubenswrapper[4689]: I0123 11:09:33.311497 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:09:34 crc kubenswrapper[4689]: I0123 11:09:34.739669 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 11:09:35 crc kubenswrapper[4689]: I0123 11:09:35.001171 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 11:09:38 crc kubenswrapper[4689]: I0123 11:09:38.310605 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 11:09:38 crc kubenswrapper[4689]: I0123 11:09:38.405426 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 11:09:38 crc kubenswrapper[4689]: I0123 11:09:38.890439 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 11:09:39 crc kubenswrapper[4689]: I0123 11:09:39.030233 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.533630 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-52rg9"] Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.567958 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.571950 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.572120 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.574515 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-7bx44" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.575865 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.580970 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-52rg9"] Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.595025 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsdh6"] Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.596975 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.604170 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.611462 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsdh6"] Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.654295 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctcwv\" (UniqueName: \"kubernetes.io/projected/cea97349-f6bc-4cf6-af0f-5c1781edea31-kube-api-access-ctcwv\") pod \"dnsmasq-dns-675f4bcbfc-52rg9\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.654390 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpgvh\" (UniqueName: \"kubernetes.io/projected/1d87a360-d751-4cb5-9b20-b547d590b258-kube-api-access-bpgvh\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.654415 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cea97349-f6bc-4cf6-af0f-5c1781edea31-config\") pod \"dnsmasq-dns-675f4bcbfc-52rg9\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.654441 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-config\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.654494 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.755935 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpgvh\" (UniqueName: \"kubernetes.io/projected/1d87a360-d751-4cb5-9b20-b547d590b258-kube-api-access-bpgvh\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.755982 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cea97349-f6bc-4cf6-af0f-5c1781edea31-config\") pod \"dnsmasq-dns-675f4bcbfc-52rg9\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.756017 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-config\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.756082 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.756124 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctcwv\" (UniqueName: \"kubernetes.io/projected/cea97349-f6bc-4cf6-af0f-5c1781edea31-kube-api-access-ctcwv\") pod \"dnsmasq-dns-675f4bcbfc-52rg9\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.757313 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-config\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.757334 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cea97349-f6bc-4cf6-af0f-5c1781edea31-config\") pod \"dnsmasq-dns-675f4bcbfc-52rg9\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.757493 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.779954 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctcwv\" (UniqueName: \"kubernetes.io/projected/cea97349-f6bc-4cf6-af0f-5c1781edea31-kube-api-access-ctcwv\") pod \"dnsmasq-dns-675f4bcbfc-52rg9\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.780111 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpgvh\" (UniqueName: \"kubernetes.io/projected/1d87a360-d751-4cb5-9b20-b547d590b258-kube-api-access-bpgvh\") pod \"dnsmasq-dns-78dd6ddcc-bsdh6\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.893598 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:09:56 crc kubenswrapper[4689]: I0123 11:09:56.922412 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:09:57 crc kubenswrapper[4689]: I0123 11:09:57.436710 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-52rg9"] Jan 23 11:09:57 crc kubenswrapper[4689]: W0123 11:09:57.443288 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d87a360_d751_4cb5_9b20_b547d590b258.slice/crio-e3301eb381818d83d3305e1fd1fa65a9e03ef6cfce203e62988ce5e87558fc34 WatchSource:0}: Error finding container e3301eb381818d83d3305e1fd1fa65a9e03ef6cfce203e62988ce5e87558fc34: Status 404 returned error can't find the container with id e3301eb381818d83d3305e1fd1fa65a9e03ef6cfce203e62988ce5e87558fc34 Jan 23 11:09:57 crc kubenswrapper[4689]: I0123 11:09:57.444223 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsdh6"] Jan 23 11:09:57 crc kubenswrapper[4689]: I0123 11:09:57.575312 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" event={"ID":"cea97349-f6bc-4cf6-af0f-5c1781edea31","Type":"ContainerStarted","Data":"9d3455c528c0b27d56246924d981ac30a4e847f401026b837b5573a92ddfbcd0"} Jan 23 11:09:57 crc kubenswrapper[4689]: I0123 11:09:57.576407 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" event={"ID":"1d87a360-d751-4cb5-9b20-b547d590b258","Type":"ContainerStarted","Data":"e3301eb381818d83d3305e1fd1fa65a9e03ef6cfce203e62988ce5e87558fc34"} Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.391481 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-52rg9"] Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.425548 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hfcbh"] Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.427001 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.442999 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hfcbh"] Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.526842 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsskj\" (UniqueName: \"kubernetes.io/projected/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-kube-api-access-jsskj\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.526900 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-dns-svc\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.527112 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-config\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.630582 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-config\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.630815 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsskj\" (UniqueName: \"kubernetes.io/projected/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-kube-api-access-jsskj\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.630872 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-dns-svc\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.636357 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-config\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.637286 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-dns-svc\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.694120 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsskj\" (UniqueName: \"kubernetes.io/projected/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-kube-api-access-jsskj\") pod \"dnsmasq-dns-666b6646f7-hfcbh\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.756113 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.767878 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsdh6"] Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.837980 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mrdnk"] Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.840006 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.878757 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mrdnk"] Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.946325 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.946414 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssmz4\" (UniqueName: \"kubernetes.io/projected/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-kube-api-access-ssmz4\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:09:59 crc kubenswrapper[4689]: I0123 11:09:59.946535 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-config\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.059635 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.059981 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssmz4\" (UniqueName: \"kubernetes.io/projected/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-kube-api-access-ssmz4\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.060059 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-config\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.061766 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.064991 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-config\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.095736 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssmz4\" (UniqueName: \"kubernetes.io/projected/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-kube-api-access-ssmz4\") pod \"dnsmasq-dns-57d769cc4f-mrdnk\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.316528 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.487681 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hfcbh"] Jan 23 11:10:00 crc kubenswrapper[4689]: W0123 11:10:00.488751 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod735b548b_8f9c_4bbe_9dc0_9bed36c7cb7f.slice/crio-c225f07bb7713df8586efb5cef079736e3fa30485dfd9f71c5d39141023748a7 WatchSource:0}: Error finding container c225f07bb7713df8586efb5cef079736e3fa30485dfd9f71c5d39141023748a7: Status 404 returned error can't find the container with id c225f07bb7713df8586efb5cef079736e3fa30485dfd9f71c5d39141023748a7 Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.566815 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.570598 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.573759 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.573900 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.574280 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.574418 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xkfbv" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.574577 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.574763 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.574862 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.589366 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.615427 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" event={"ID":"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f","Type":"ContainerStarted","Data":"c225f07bb7713df8586efb5cef079736e3fa30485dfd9f71c5d39141023748a7"} Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.622417 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.624681 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.645584 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.648710 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.671689 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682305 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682374 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682414 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682432 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxnd9\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-kube-api-access-fxnd9\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682457 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682480 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682503 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682533 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682552 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682627 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-config-data\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.682654 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.709843 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787048 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787106 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/126daef6-1490-45c1-898a-b51a0b069546-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787136 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-config-data\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787263 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787291 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787321 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787387 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cee46a2e-5707-4ade-a456-ed3466f9e969-pod-info\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787421 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-server-conf\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787476 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787518 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cee46a2e-5707-4ade-a456-ed3466f9e969-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787566 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787599 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-config-data\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787628 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787667 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd68k\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-kube-api-access-jd68k\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787693 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787719 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787745 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787816 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787851 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787881 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787908 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-server-conf\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.787936 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.788014 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.788043 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxnd9\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-kube-api-access-fxnd9\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.788069 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.788093 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.788122 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.788159 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mg77\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-kube-api-access-8mg77\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.789252 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.789606 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.789664 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-config-data\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.789774 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.789941 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.790226 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/126daef6-1490-45c1-898a-b51a0b069546-pod-info\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.790304 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.790333 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.790470 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-config-data\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.793409 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.796655 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.796694 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e3bfba15c2620bda99933c0d1fcd02c9f7555e6a83c90ae95be6723cb9e7b56d/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.797706 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.799415 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.802978 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.806913 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.810904 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxnd9\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-kube-api-access-fxnd9\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.846207 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894741 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894784 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mg77\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-kube-api-access-8mg77\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894820 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-config-data\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894857 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/126daef6-1490-45c1-898a-b51a0b069546-pod-info\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894880 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894909 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/126daef6-1490-45c1-898a-b51a0b069546-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894931 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-config-data\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894955 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.894979 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895013 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cee46a2e-5707-4ade-a456-ed3466f9e969-pod-info\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895039 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-server-conf\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895078 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895112 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cee46a2e-5707-4ade-a456-ed3466f9e969-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895135 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895175 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd68k\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-kube-api-access-jd68k\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895194 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895210 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895227 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895251 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895273 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895291 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-server-conf\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895328 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895732 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.895938 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.896581 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.898483 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.898832 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-config-data\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.899086 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.899453 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.899480 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/501d83a3da2ec14b1d180069419aef4deb1d58e5f74a36ce9437297357b54132/globalmount\"" pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.900271 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.901507 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-server-conf\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.902263 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-config-data\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.903638 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mrdnk"] Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.904171 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.904195 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1a1c51b49e7f304157ff32a4fbdeaf59458f4c680bdd3694589f5d788ea7a57f/globalmount\"" pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: W0123 11:10:00.905656 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc88bc3f8_37ac_4592_85d3_a5e048f48e1d.slice/crio-4d74c3720661ae1410fe2c21af133a80ebf8d59adfb626b70d873d817535e61c WatchSource:0}: Error finding container 4d74c3720661ae1410fe2c21af133a80ebf8d59adfb626b70d873d817535e61c: Status 404 returned error can't find the container with id 4d74c3720661ae1410fe2c21af133a80ebf8d59adfb626b70d873d817535e61c Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.906358 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-server-conf\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.908769 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cee46a2e-5707-4ade-a456-ed3466f9e969-pod-info\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.909646 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cee46a2e-5707-4ade-a456-ed3466f9e969-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.910734 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.915407 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/126daef6-1490-45c1-898a-b51a0b069546-pod-info\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.915482 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mg77\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-kube-api-access-8mg77\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.915736 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.917037 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.917828 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.921019 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.926508 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd68k\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-kube-api-access-jd68k\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.926565 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/126daef6-1490-45c1-898a-b51a0b069546-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.954352 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " pod="openstack/rabbitmq-server-1" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.968915 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " pod="openstack/rabbitmq-server-2" Jan 23 11:10:00 crc kubenswrapper[4689]: I0123 11:10:00.973125 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.000491 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.007228 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.012527 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.012617 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-xbtxv" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.012820 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.012932 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.013129 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.013186 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.013275 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.022765 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100483 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100813 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100847 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100879 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100916 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100938 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100968 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/84c84a76-3fda-4d1e-bc46-e806b5462845-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.100995 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.101032 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.101047 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5d2r\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-kube-api-access-c5d2r\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.101090 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/84c84a76-3fda-4d1e-bc46-e806b5462845-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.203942 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204033 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204059 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5d2r\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-kube-api-access-c5d2r\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204126 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/84c84a76-3fda-4d1e-bc46-e806b5462845-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204263 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204302 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204338 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204384 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204435 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204466 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204678 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.204901 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/84c84a76-3fda-4d1e-bc46-e806b5462845-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.206132 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.206704 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.206972 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.209990 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.210630 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.210684 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2978a247f81105abfa16be10d09657c6d2bfe07654d1f81f07d27e81227d5715/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.214012 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.214139 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/84c84a76-3fda-4d1e-bc46-e806b5462845-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.214745 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/84c84a76-3fda-4d1e-bc46-e806b5462845-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.221054 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.232131 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5d2r\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-kube-api-access-c5d2r\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.250602 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.272422 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.343821 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.552905 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:10:01 crc kubenswrapper[4689]: W0123 11:10:01.562424 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8287b3f6_975a_4082_a086_bd1ee9ec4d7b.slice/crio-4fe1935ab432ef302a11cce2f11c0d4be10aef8b0eac81ab75516a5a9c81e7e8 WatchSource:0}: Error finding container 4fe1935ab432ef302a11cce2f11c0d4be10aef8b0eac81ab75516a5a9c81e7e8: Status 404 returned error can't find the container with id 4fe1935ab432ef302a11cce2f11c0d4be10aef8b0eac81ab75516a5a9c81e7e8 Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.660820 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8287b3f6-975a-4082-a086-bd1ee9ec4d7b","Type":"ContainerStarted","Data":"4fe1935ab432ef302a11cce2f11c0d4be10aef8b0eac81ab75516a5a9c81e7e8"} Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.662473 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" event={"ID":"c88bc3f8-37ac-4592-85d3-a5e048f48e1d","Type":"ContainerStarted","Data":"4d74c3720661ae1410fe2c21af133a80ebf8d59adfb626b70d873d817535e61c"} Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.668233 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.878539 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:10:01 crc kubenswrapper[4689]: I0123 11:10:01.923400 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.243780 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.247603 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.250542 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-c2smf" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.250889 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.251570 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.252657 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.283732 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.313660 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.355706 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-config-data-default\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.355780 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-operator-scripts\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.356055 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9926a3b2-8d65-4876-b56b-488948df1352-config-data-generated\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.356308 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.356340 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9926a3b2-8d65-4876-b56b-488948df1352-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.356365 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khhgk\" (UniqueName: \"kubernetes.io/projected/9926a3b2-8d65-4876-b56b-488948df1352-kube-api-access-khhgk\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.356450 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-kolla-config\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.356574 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9926a3b2-8d65-4876-b56b-488948df1352-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.458854 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9926a3b2-8d65-4876-b56b-488948df1352-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.458939 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-config-data-default\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.458965 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-operator-scripts\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.459051 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9926a3b2-8d65-4876-b56b-488948df1352-config-data-generated\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.459119 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9926a3b2-8d65-4876-b56b-488948df1352-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.459140 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.459173 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khhgk\" (UniqueName: \"kubernetes.io/projected/9926a3b2-8d65-4876-b56b-488948df1352-kube-api-access-khhgk\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.459205 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-kolla-config\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.460300 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-kolla-config\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.464633 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9926a3b2-8d65-4876-b56b-488948df1352-config-data-generated\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.465549 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-config-data-default\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.466866 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9926a3b2-8d65-4876-b56b-488948df1352-operator-scripts\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.469077 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.469115 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6a35cf9d1a966469b6d612188c151f1a1f59d2dcdf4cfa9de82618c08b1befe3/globalmount\"" pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.472029 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9926a3b2-8d65-4876-b56b-488948df1352-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.483990 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khhgk\" (UniqueName: \"kubernetes.io/projected/9926a3b2-8d65-4876-b56b-488948df1352-kube-api-access-khhgk\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.487382 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9926a3b2-8d65-4876-b56b-488948df1352-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.535999 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44ae9daf-1264-4d14-90ca-c33e6398d100\") pod \"openstack-galera-0\" (UID: \"9926a3b2-8d65-4876-b56b-488948df1352\") " pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.611852 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 23 11:10:02 crc kubenswrapper[4689]: I0123 11:10:02.672537 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"cee46a2e-5707-4ade-a456-ed3466f9e969","Type":"ContainerStarted","Data":"333dd15934864753703a26671b7adafe9452ed8636fcc61fb6aa2465956d0ad5"} Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.310874 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.311090 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.554740 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.556282 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.560744 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.561688 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.561814 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-6f2wz" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.562185 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.587006 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688382 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688474 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgz4c\" (UniqueName: \"kubernetes.io/projected/cab355b0-25b6-4ad4-83ad-718ae756ae29-kube-api-access-dgz4c\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688521 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cab355b0-25b6-4ad4-83ad-718ae756ae29-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688546 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cab355b0-25b6-4ad4-83ad-718ae756ae29-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688582 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cab355b0-25b6-4ad4-83ad-718ae756ae29-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688597 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688652 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2924047f-63af-49b6-ba9a-028905749b82\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2924047f-63af-49b6-ba9a-028905749b82\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.688709 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.750097 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.751343 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.753041 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.753592 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-nhhgt" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.753924 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.766726 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791603 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cab355b0-25b6-4ad4-83ad-718ae756ae29-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791651 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cab355b0-25b6-4ad4-83ad-718ae756ae29-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791701 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cab355b0-25b6-4ad4-83ad-718ae756ae29-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791728 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791761 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2924047f-63af-49b6-ba9a-028905749b82\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2924047f-63af-49b6-ba9a-028905749b82\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791800 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791873 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.791963 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgz4c\" (UniqueName: \"kubernetes.io/projected/cab355b0-25b6-4ad4-83ad-718ae756ae29-kube-api-access-dgz4c\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.793069 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.793501 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cab355b0-25b6-4ad4-83ad-718ae756ae29-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.794996 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.796339 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cab355b0-25b6-4ad4-83ad-718ae756ae29-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.799961 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.800019 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2924047f-63af-49b6-ba9a-028905749b82\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2924047f-63af-49b6-ba9a-028905749b82\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/502874ffb6d7334a52eaffb53fd25e2bfc9ffe8aad7465370dfb43cfc90907f7/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.817936 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cab355b0-25b6-4ad4-83ad-718ae756ae29-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.818686 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cab355b0-25b6-4ad4-83ad-718ae756ae29-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.822129 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgz4c\" (UniqueName: \"kubernetes.io/projected/cab355b0-25b6-4ad4-83ad-718ae756ae29-kube-api-access-dgz4c\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.894375 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b85b39c-5625-416e-9ddd-55845b645716-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.894441 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b85b39c-5625-416e-9ddd-55845b645716-config-data\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.894486 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b85b39c-5625-416e-9ddd-55845b645716-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.894618 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dmtn\" (UniqueName: \"kubernetes.io/projected/3b85b39c-5625-416e-9ddd-55845b645716-kube-api-access-6dmtn\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.894655 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b85b39c-5625-416e-9ddd-55845b645716-kolla-config\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.899410 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2924047f-63af-49b6-ba9a-028905749b82\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2924047f-63af-49b6-ba9a-028905749b82\") pod \"openstack-cell1-galera-0\" (UID: \"cab355b0-25b6-4ad4-83ad-718ae756ae29\") " pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.996062 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dmtn\" (UniqueName: \"kubernetes.io/projected/3b85b39c-5625-416e-9ddd-55845b645716-kube-api-access-6dmtn\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.996127 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b85b39c-5625-416e-9ddd-55845b645716-kolla-config\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.996234 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b85b39c-5625-416e-9ddd-55845b645716-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.996292 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b85b39c-5625-416e-9ddd-55845b645716-config-data\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.996341 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b85b39c-5625-416e-9ddd-55845b645716-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.997755 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b85b39c-5625-416e-9ddd-55845b645716-kolla-config\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:03 crc kubenswrapper[4689]: I0123 11:10:03.997922 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b85b39c-5625-416e-9ddd-55845b645716-config-data\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:04 crc kubenswrapper[4689]: I0123 11:10:04.021857 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b85b39c-5625-416e-9ddd-55845b645716-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:04 crc kubenswrapper[4689]: I0123 11:10:04.025282 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b85b39c-5625-416e-9ddd-55845b645716-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:04 crc kubenswrapper[4689]: I0123 11:10:04.033942 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dmtn\" (UniqueName: \"kubernetes.io/projected/3b85b39c-5625-416e-9ddd-55845b645716-kube-api-access-6dmtn\") pod \"memcached-0\" (UID: \"3b85b39c-5625-416e-9ddd-55845b645716\") " pod="openstack/memcached-0" Jan 23 11:10:04 crc kubenswrapper[4689]: I0123 11:10:04.071321 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 23 11:10:04 crc kubenswrapper[4689]: I0123 11:10:04.200749 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 23 11:10:05 crc kubenswrapper[4689]: I0123 11:10:05.837892 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:10:05 crc kubenswrapper[4689]: I0123 11:10:05.839918 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 11:10:05 crc kubenswrapper[4689]: I0123 11:10:05.848089 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-4bsf6" Jan 23 11:10:05 crc kubenswrapper[4689]: I0123 11:10:05.853877 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:10:05 crc kubenswrapper[4689]: I0123 11:10:05.966644 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf4vt\" (UniqueName: \"kubernetes.io/projected/30b34da3-4b55-46a6-88c3-4bdfcbde2f66-kube-api-access-qf4vt\") pod \"kube-state-metrics-0\" (UID: \"30b34da3-4b55-46a6-88c3-4bdfcbde2f66\") " pod="openstack/kube-state-metrics-0" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.068847 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf4vt\" (UniqueName: \"kubernetes.io/projected/30b34da3-4b55-46a6-88c3-4bdfcbde2f66-kube-api-access-qf4vt\") pod \"kube-state-metrics-0\" (UID: \"30b34da3-4b55-46a6-88c3-4bdfcbde2f66\") " pod="openstack/kube-state-metrics-0" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.101017 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf4vt\" (UniqueName: \"kubernetes.io/projected/30b34da3-4b55-46a6-88c3-4bdfcbde2f66-kube-api-access-qf4vt\") pod \"kube-state-metrics-0\" (UID: \"30b34da3-4b55-46a6-88c3-4bdfcbde2f66\") " pod="openstack/kube-state-metrics-0" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.171679 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.475574 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-fn992"] Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.483020 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.493694 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.493993 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-5rmj5" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.503688 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-fn992"] Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.584452 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4zlr\" (UniqueName: \"kubernetes.io/projected/0c864284-1e65-4712-a75f-bad9506f55d6-kube-api-access-c4zlr\") pod \"observability-ui-dashboards-66cbf594b5-fn992\" (UID: \"0c864284-1e65-4712-a75f-bad9506f55d6\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.584582 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c864284-1e65-4712-a75f-bad9506f55d6-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-fn992\" (UID: \"0c864284-1e65-4712-a75f-bad9506f55d6\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.688391 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c864284-1e65-4712-a75f-bad9506f55d6-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-fn992\" (UID: \"0c864284-1e65-4712-a75f-bad9506f55d6\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.688487 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4zlr\" (UniqueName: \"kubernetes.io/projected/0c864284-1e65-4712-a75f-bad9506f55d6-kube-api-access-c4zlr\") pod \"observability-ui-dashboards-66cbf594b5-fn992\" (UID: \"0c864284-1e65-4712-a75f-bad9506f55d6\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:06 crc kubenswrapper[4689]: E0123 11:10:06.689370 4689 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Jan 23 11:10:06 crc kubenswrapper[4689]: E0123 11:10:06.689418 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0c864284-1e65-4712-a75f-bad9506f55d6-serving-cert podName:0c864284-1e65-4712-a75f-bad9506f55d6 nodeName:}" failed. No retries permitted until 2026-01-23 11:10:07.189404614 +0000 UTC m=+1271.814084473 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0c864284-1e65-4712-a75f-bad9506f55d6-serving-cert") pod "observability-ui-dashboards-66cbf594b5-fn992" (UID: "0c864284-1e65-4712-a75f-bad9506f55d6") : secret "observability-ui-dashboards" not found Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.709287 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4zlr\" (UniqueName: \"kubernetes.io/projected/0c864284-1e65-4712-a75f-bad9506f55d6-kube-api-access-c4zlr\") pod \"observability-ui-dashboards-66cbf594b5-fn992\" (UID: \"0c864284-1e65-4712-a75f-bad9506f55d6\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.830455 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5859dc97b8-47f8f"] Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.831894 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.859538 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5859dc97b8-47f8f"] Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.911545 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.913618 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.916369 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.916567 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.916688 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.916791 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.916970 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.916982 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.917109 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.917264 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-78tqt" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.935747 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.998311 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49thk\" (UniqueName: \"kubernetes.io/projected/5d2085e7-92df-4502-97e9-66dfbfae189a-kube-api-access-49thk\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.998387 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-trusted-ca-bundle\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.998428 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-oauth-serving-cert\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.998482 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5d2085e7-92df-4502-97e9-66dfbfae189a-console-oauth-config\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.998512 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-service-ca\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.998564 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5d2085e7-92df-4502-97e9-66dfbfae189a-console-serving-cert\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:06 crc kubenswrapper[4689]: I0123 11:10:06.998656 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-console-config\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.100770 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-config\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.100861 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.100903 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.100931 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.100970 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-console-config\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.100997 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.101103 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49thk\" (UniqueName: \"kubernetes.io/projected/5d2085e7-92df-4502-97e9-66dfbfae189a-kube-api-access-49thk\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.101667 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-trusted-ca-bundle\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.101751 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-oauth-serving-cert\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.101881 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-console-config\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102505 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-oauth-serving-cert\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102535 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-trusted-ca-bundle\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.101782 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102594 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102686 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5d2085e7-92df-4502-97e9-66dfbfae189a-console-oauth-config\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102720 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-service-ca\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102785 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102811 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s98xq\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-kube-api-access-s98xq\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102864 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5d2085e7-92df-4502-97e9-66dfbfae189a-console-serving-cert\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.102880 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/963b9b68-de0c-48ca-8a48-4641f6eb3688-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.106994 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5d2085e7-92df-4502-97e9-66dfbfae189a-service-ca\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.107617 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5d2085e7-92df-4502-97e9-66dfbfae189a-console-serving-cert\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.110380 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5d2085e7-92df-4502-97e9-66dfbfae189a-console-oauth-config\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205236 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/963b9b68-de0c-48ca-8a48-4641f6eb3688-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205306 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c864284-1e65-4712-a75f-bad9506f55d6-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-fn992\" (UID: \"0c864284-1e65-4712-a75f-bad9506f55d6\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205365 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-config\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205397 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205460 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205497 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205530 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205627 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205656 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205734 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.205760 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s98xq\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-kube-api-access-s98xq\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.206417 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.206554 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.206940 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.209704 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.209746 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0e6ddec39173b96d0ed120a8bb31333c778d498bbf2e1919046af9890c5ee84b/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.210102 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.210172 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.211009 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-config\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.222684 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c864284-1e65-4712-a75f-bad9506f55d6-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-fn992\" (UID: \"0c864284-1e65-4712-a75f-bad9506f55d6\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.223984 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.228704 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/963b9b68-de0c-48ca-8a48-4641f6eb3688-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.229041 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s98xq\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-kube-api-access-s98xq\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.251759 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.400340 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49thk\" (UniqueName: \"kubernetes.io/projected/5d2085e7-92df-4502-97e9-66dfbfae189a-kube-api-access-49thk\") pod \"console-5859dc97b8-47f8f\" (UID: \"5d2085e7-92df-4502-97e9-66dfbfae189a\") " pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.432322 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.457804 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:07 crc kubenswrapper[4689]: I0123 11:10:07.537361 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.143110 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.148373 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.151592 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-k5vqq" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.152091 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.152389 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.153706 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.154304 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.155969 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.244799 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.244966 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8245587e-b385-48bf-a684-2c72fedfb5d6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.245031 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.245091 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98xcs\" (UniqueName: \"kubernetes.io/projected/8245587e-b385-48bf-a684-2c72fedfb5d6-kube-api-access-98xcs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.245262 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8245587e-b385-48bf-a684-2c72fedfb5d6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.245295 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.245445 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.245721 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8245587e-b385-48bf-a684-2c72fedfb5d6-config\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347560 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8245587e-b385-48bf-a684-2c72fedfb5d6-config\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347644 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347692 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8245587e-b385-48bf-a684-2c72fedfb5d6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347719 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347746 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98xcs\" (UniqueName: \"kubernetes.io/projected/8245587e-b385-48bf-a684-2c72fedfb5d6-kube-api-access-98xcs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347801 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8245587e-b385-48bf-a684-2c72fedfb5d6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347828 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.347888 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.352229 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8245587e-b385-48bf-a684-2c72fedfb5d6-config\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.353368 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.354355 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8245587e-b385-48bf-a684-2c72fedfb5d6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.355424 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8245587e-b385-48bf-a684-2c72fedfb5d6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.358809 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.367497 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.367566 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e3aea165543cb9ece3055cc8382b697e2c1bd1fe97b69a7ffe9fa096412355a3/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.367946 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8245587e-b385-48bf-a684-2c72fedfb5d6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.375049 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98xcs\" (UniqueName: \"kubernetes.io/projected/8245587e-b385-48bf-a684-2c72fedfb5d6-kube-api-access-98xcs\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.401444 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9a3aa21a-aad0-4123-a4e5-faba7e8a5784\") pod \"ovsdbserver-nb-0\" (UID: \"8245587e-b385-48bf-a684-2c72fedfb5d6\") " pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.512791 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.923921 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s4nwf"] Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.925495 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.928409 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.929339 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-65mlx" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.931840 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.940566 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s4nwf"] Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.951880 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-kfl9p"] Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.953720 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:09 crc kubenswrapper[4689]: I0123 11:10:09.961978 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-kfl9p"] Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061116 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-run\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061194 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-run-ovn\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061240 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f125458c-8822-4c87-a559-adf4f9387166-scripts\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061270 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-log\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061301 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-etc-ovs\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061361 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8kf5\" (UniqueName: \"kubernetes.io/projected/f125458c-8822-4c87-a559-adf4f9387166-kube-api-access-m8kf5\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061390 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmn9q\" (UniqueName: \"kubernetes.io/projected/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-kube-api-access-tmn9q\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061416 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-run\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061449 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-scripts\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061529 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-combined-ca-bundle\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061555 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-log-ovn\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061581 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-lib\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.061607 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-ovn-controller-tls-certs\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.163787 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8kf5\" (UniqueName: \"kubernetes.io/projected/f125458c-8822-4c87-a559-adf4f9387166-kube-api-access-m8kf5\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.163849 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmn9q\" (UniqueName: \"kubernetes.io/projected/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-kube-api-access-tmn9q\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.163880 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-run\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.163921 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-scripts\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164177 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-combined-ca-bundle\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164211 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-log-ovn\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164242 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-lib\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164267 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-ovn-controller-tls-certs\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164330 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-run\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164367 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-run-ovn\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164418 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f125458c-8822-4c87-a559-adf4f9387166-scripts\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164450 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-log\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164505 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-etc-ovs\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164941 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-etc-ovs\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.164938 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-run\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.165087 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-lib\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.165098 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-run-ovn\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.165203 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-log-ovn\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.165287 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f125458c-8822-4c87-a559-adf4f9387166-var-log\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.165344 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-var-run\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.166932 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-scripts\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.168953 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f125458c-8822-4c87-a559-adf4f9387166-scripts\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.169231 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-combined-ca-bundle\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.171529 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-ovn-controller-tls-certs\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.182903 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmn9q\" (UniqueName: \"kubernetes.io/projected/6c69c7bf-0e75-4bed-a212-2b7746d5ef88-kube-api-access-tmn9q\") pod \"ovn-controller-s4nwf\" (UID: \"6c69c7bf-0e75-4bed-a212-2b7746d5ef88\") " pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.183717 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8kf5\" (UniqueName: \"kubernetes.io/projected/f125458c-8822-4c87-a559-adf4f9387166-kube-api-access-m8kf5\") pod \"ovn-controller-ovs-kfl9p\" (UID: \"f125458c-8822-4c87-a559-adf4f9387166\") " pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.251520 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:10 crc kubenswrapper[4689]: I0123 11:10:10.273652 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:10 crc kubenswrapper[4689]: W0123 11:10:10.813700 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84c84a76_3fda_4d1e_bc46_e806b5462845.slice/crio-788e4ffeefedad268af4095552f7c69825df0be1295bbe86d4078ec045805263 WatchSource:0}: Error finding container 788e4ffeefedad268af4095552f7c69825df0be1295bbe86d4078ec045805263: Status 404 returned error can't find the container with id 788e4ffeefedad268af4095552f7c69825df0be1295bbe86d4078ec045805263 Jan 23 11:10:11 crc kubenswrapper[4689]: I0123 11:10:11.780614 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"126daef6-1490-45c1-898a-b51a0b069546","Type":"ContainerStarted","Data":"6a7c95733a3dd5f7ff3648c5d895f2f023ae9fc9351bf9b304086175357e3541"} Jan 23 11:10:11 crc kubenswrapper[4689]: I0123 11:10:11.784191 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"84c84a76-3fda-4d1e-bc46-e806b5462845","Type":"ContainerStarted","Data":"788e4ffeefedad268af4095552f7c69825df0be1295bbe86d4078ec045805263"} Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.132272 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.138115 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.140115 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.141582 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-f85pv" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.141764 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.141972 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.150690 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.233065 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4c642241-8901-4942-aa1b-b95db101ebb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4c642241-8901-4942-aa1b-b95db101ebb8\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.233202 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5c276d1f-838f-4113-b343-18c150dfa59b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.233247 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c276d1f-838f-4113-b343-18c150dfa59b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.233269 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ttxx\" (UniqueName: \"kubernetes.io/projected/5c276d1f-838f-4113-b343-18c150dfa59b-kube-api-access-5ttxx\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.233440 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c276d1f-838f-4113-b343-18c150dfa59b-config\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.233808 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.234034 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.234103 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336618 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336679 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4c642241-8901-4942-aa1b-b95db101ebb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4c642241-8901-4942-aa1b-b95db101ebb8\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336732 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5c276d1f-838f-4113-b343-18c150dfa59b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336761 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c276d1f-838f-4113-b343-18c150dfa59b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336778 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ttxx\" (UniqueName: \"kubernetes.io/projected/5c276d1f-838f-4113-b343-18c150dfa59b-kube-api-access-5ttxx\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336817 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c276d1f-838f-4113-b343-18c150dfa59b-config\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336886 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.336949 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.337832 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5c276d1f-838f-4113-b343-18c150dfa59b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.338220 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c276d1f-838f-4113-b343-18c150dfa59b-config\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.338267 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c276d1f-838f-4113-b343-18c150dfa59b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.340360 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.340397 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4c642241-8901-4942-aa1b-b95db101ebb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4c642241-8901-4942-aa1b-b95db101ebb8\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6cb7ec7c78f6798e6a5af4fdae764449f7b06f9cc43aeb9b5c7db82cf5fd2328/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.355970 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.358389 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ttxx\" (UniqueName: \"kubernetes.io/projected/5c276d1f-838f-4113-b343-18c150dfa59b-kube-api-access-5ttxx\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.358540 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.363944 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c276d1f-838f-4113-b343-18c150dfa59b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.385060 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4c642241-8901-4942-aa1b-b95db101ebb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4c642241-8901-4942-aa1b-b95db101ebb8\") pod \"ovsdbserver-sb-0\" (UID: \"5c276d1f-838f-4113-b343-18c150dfa59b\") " pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:13 crc kubenswrapper[4689]: I0123 11:10:13.474222 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 23 11:10:15 crc kubenswrapper[4689]: I0123 11:10:15.224128 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 11:10:20 crc kubenswrapper[4689]: W0123 11:10:20.347113 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcab355b0_25b6_4ad4_83ad_718ae756ae29.slice/crio-e21127bfd29f980599950e3d064427011bb0832857794db2e35d11ff1fffd96f WatchSource:0}: Error finding container e21127bfd29f980599950e3d064427011bb0832857794db2e35d11ff1fffd96f: Status 404 returned error can't find the container with id e21127bfd29f980599950e3d064427011bb0832857794db2e35d11ff1fffd96f Jan 23 11:10:20 crc kubenswrapper[4689]: I0123 11:10:20.864466 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cab355b0-25b6-4ad4-83ad-718ae756ae29","Type":"ContainerStarted","Data":"e21127bfd29f980599950e3d064427011bb0832857794db2e35d11ff1fffd96f"} Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.731444 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.731605 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jsskj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-hfcbh_openstack(735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.732898 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" podUID="735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.742787 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.742935 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bpgvh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-bsdh6_openstack(1d87a360-d751-4cb5-9b20-b547d590b258): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.746304 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" podUID="1d87a360-d751-4cb5-9b20-b547d590b258" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.779103 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.779410 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ctcwv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-52rg9_openstack(cea97349-f6bc-4cf6-af0f-5c1781edea31): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.780728 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" podUID="cea97349-f6bc-4cf6-af0f-5c1781edea31" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.785827 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.786103 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ssmz4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-mrdnk_openstack(c88bc3f8-37ac-4592-85d3-a5e048f48e1d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.788329 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" podUID="c88bc3f8-37ac-4592-85d3-a5e048f48e1d" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.886416 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" podUID="c88bc3f8-37ac-4592-85d3-a5e048f48e1d" Jan 23 11:10:21 crc kubenswrapper[4689]: E0123 11:10:21.886446 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" podUID="735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.648245 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.765355 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpgvh\" (UniqueName: \"kubernetes.io/projected/1d87a360-d751-4cb5-9b20-b547d590b258-kube-api-access-bpgvh\") pod \"1d87a360-d751-4cb5-9b20-b547d590b258\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.765551 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-config\") pod \"1d87a360-d751-4cb5-9b20-b547d590b258\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.765692 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-dns-svc\") pod \"1d87a360-d751-4cb5-9b20-b547d590b258\" (UID: \"1d87a360-d751-4cb5-9b20-b547d590b258\") " Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.766043 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1d87a360-d751-4cb5-9b20-b547d590b258" (UID: "1d87a360-d751-4cb5-9b20-b547d590b258"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.766197 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-config" (OuterVolumeSpecName: "config") pod "1d87a360-d751-4cb5-9b20-b547d590b258" (UID: "1d87a360-d751-4cb5-9b20-b547d590b258"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.768632 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.768663 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d87a360-d751-4cb5-9b20-b547d590b258-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.774883 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d87a360-d751-4cb5-9b20-b547d590b258-kube-api-access-bpgvh" (OuterVolumeSpecName: "kube-api-access-bpgvh") pod "1d87a360-d751-4cb5-9b20-b547d590b258" (UID: "1d87a360-d751-4cb5-9b20-b547d590b258"). InnerVolumeSpecName "kube-api-access-bpgvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.792933 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.802916 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.870801 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpgvh\" (UniqueName: \"kubernetes.io/projected/1d87a360-d751-4cb5-9b20-b547d590b258-kube-api-access-bpgvh\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:22 crc kubenswrapper[4689]: W0123 11:10:22.884672 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9926a3b2_8d65_4876_b56b_488948df1352.slice/crio-d731be38b32369a1ac7458576d47bca3e985f29718d90c53a0595843d4e4c230 WatchSource:0}: Error finding container d731be38b32369a1ac7458576d47bca3e985f29718d90c53a0595843d4e4c230: Status 404 returned error can't find the container with id d731be38b32369a1ac7458576d47bca3e985f29718d90c53a0595843d4e4c230 Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.898044 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" event={"ID":"1d87a360-d751-4cb5-9b20-b547d590b258","Type":"ContainerDied","Data":"e3301eb381818d83d3305e1fd1fa65a9e03ef6cfce203e62988ce5e87558fc34"} Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.898261 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bsdh6" Jan 23 11:10:22 crc kubenswrapper[4689]: I0123 11:10:22.990928 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsdh6"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.004283 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bsdh6"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.024334 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-kfl9p"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.145590 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s4nwf"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.167217 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.241019 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.453194 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.466572 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-fn992"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.521573 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5859dc97b8-47f8f"] Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.657452 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d87a360-d751-4cb5-9b20-b547d590b258" path="/var/lib/kubelet/pods/1d87a360-d751-4cb5-9b20-b547d590b258/volumes" Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.916523 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerStarted","Data":"c7764eee40e8cd9678326b78683d0231b5d9b231b9d9c3a2713e7761a4561571"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.919295 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"cee46a2e-5707-4ade-a456-ed3466f9e969","Type":"ContainerStarted","Data":"56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.920813 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" event={"ID":"cea97349-f6bc-4cf6-af0f-5c1781edea31","Type":"ContainerDied","Data":"9d3455c528c0b27d56246924d981ac30a4e847f401026b837b5573a92ddfbcd0"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.920860 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d3455c528c0b27d56246924d981ac30a4e847f401026b837b5573a92ddfbcd0" Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.924137 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"5c276d1f-838f-4113-b343-18c150dfa59b","Type":"ContainerStarted","Data":"f90684b784855bf5177a311f69be9d34a464378242f85cda9913ef02745c838d"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.926216 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf" event={"ID":"6c69c7bf-0e75-4bed-a212-2b7746d5ef88","Type":"ContainerStarted","Data":"125de9967a7cbe32beb16bf1c825c2378b45b59747cd535c10cf152b51c4bcb5"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.927622 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3b85b39c-5625-416e-9ddd-55845b645716","Type":"ContainerStarted","Data":"ce2cbdeb0b2e4dc0daf6bf5bd1323fb829829ae72351b5dd6639a866fca28505"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.933263 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kfl9p" event={"ID":"f125458c-8822-4c87-a559-adf4f9387166","Type":"ContainerStarted","Data":"193cc234c8f50840084a89c069030ea48f4dc0cae1bb4f3305f5ac553207abd5"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.934463 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5859dc97b8-47f8f" event={"ID":"5d2085e7-92df-4502-97e9-66dfbfae189a","Type":"ContainerStarted","Data":"1ad9b3d4a38362e648809fc1d1376d3251e73824319c64816344e70bf1cacddc"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.936313 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"9926a3b2-8d65-4876-b56b-488948df1352","Type":"ContainerStarted","Data":"d731be38b32369a1ac7458576d47bca3e985f29718d90c53a0595843d4e4c230"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.937883 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" event={"ID":"0c864284-1e65-4712-a75f-bad9506f55d6","Type":"ContainerStarted","Data":"f131e887b1b62b91db020d2bf82894ebc5fffc6c969055ecb3c149ef9777f0b2"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.939976 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"30b34da3-4b55-46a6-88c3-4bdfcbde2f66","Type":"ContainerStarted","Data":"0716e26846fef6cd3089a5560c606797afbb6bccc91f2cb7cb5583eda8d20ead"} Jan 23 11:10:23 crc kubenswrapper[4689]: I0123 11:10:23.974451 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.018276 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cea97349-f6bc-4cf6-af0f-5c1781edea31-config\") pod \"cea97349-f6bc-4cf6-af0f-5c1781edea31\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.018517 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctcwv\" (UniqueName: \"kubernetes.io/projected/cea97349-f6bc-4cf6-af0f-5c1781edea31-kube-api-access-ctcwv\") pod \"cea97349-f6bc-4cf6-af0f-5c1781edea31\" (UID: \"cea97349-f6bc-4cf6-af0f-5c1781edea31\") " Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.018810 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cea97349-f6bc-4cf6-af0f-5c1781edea31-config" (OuterVolumeSpecName: "config") pod "cea97349-f6bc-4cf6-af0f-5c1781edea31" (UID: "cea97349-f6bc-4cf6-af0f-5c1781edea31"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.019344 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cea97349-f6bc-4cf6-af0f-5c1781edea31-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.036923 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cea97349-f6bc-4cf6-af0f-5c1781edea31-kube-api-access-ctcwv" (OuterVolumeSpecName: "kube-api-access-ctcwv") pod "cea97349-f6bc-4cf6-af0f-5c1781edea31" (UID: "cea97349-f6bc-4cf6-af0f-5c1781edea31"). InnerVolumeSpecName "kube-api-access-ctcwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.057197 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 11:10:24 crc kubenswrapper[4689]: W0123 11:10:24.060173 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8245587e_b385_48bf_a684_2c72fedfb5d6.slice/crio-3947cfc4bb1027acc682c4685653609dac3030492cf9efe379ff3d4ce69f997b WatchSource:0}: Error finding container 3947cfc4bb1027acc682c4685653609dac3030492cf9efe379ff3d4ce69f997b: Status 404 returned error can't find the container with id 3947cfc4bb1027acc682c4685653609dac3030492cf9efe379ff3d4ce69f997b Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.122460 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctcwv\" (UniqueName: \"kubernetes.io/projected/cea97349-f6bc-4cf6-af0f-5c1781edea31-kube-api-access-ctcwv\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.954011 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5859dc97b8-47f8f" event={"ID":"5d2085e7-92df-4502-97e9-66dfbfae189a","Type":"ContainerStarted","Data":"466f16cbff91ecef476fbf44e262293aa570836a2c90bc368fc4dc6e34942c0b"} Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.955818 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"126daef6-1490-45c1-898a-b51a0b069546","Type":"ContainerStarted","Data":"d16acc7bda60fa122cc4c7a5a7c8aa08193fdfa0635393cc61f64782683992c8"} Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.958642 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8287b3f6-975a-4082-a086-bd1ee9ec4d7b","Type":"ContainerStarted","Data":"7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770"} Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.960431 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8245587e-b385-48bf-a684-2c72fedfb5d6","Type":"ContainerStarted","Data":"3947cfc4bb1027acc682c4685653609dac3030492cf9efe379ff3d4ce69f997b"} Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.962162 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"84c84a76-3fda-4d1e-bc46-e806b5462845","Type":"ContainerStarted","Data":"8c5b9da8c95f2018a8bac1dadf51c7270e4e896fb8da259fd6208b603520d69d"} Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.962230 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-52rg9" Jan 23 11:10:24 crc kubenswrapper[4689]: I0123 11:10:24.975253 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5859dc97b8-47f8f" podStartSLOduration=18.975237012 podStartE2EDuration="18.975237012s" podCreationTimestamp="2026-01-23 11:10:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:10:24.970881114 +0000 UTC m=+1289.595560973" watchObservedRunningTime="2026-01-23 11:10:24.975237012 +0000 UTC m=+1289.599916871" Jan 23 11:10:25 crc kubenswrapper[4689]: I0123 11:10:25.129534 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-52rg9"] Jan 23 11:10:25 crc kubenswrapper[4689]: I0123 11:10:25.136005 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-52rg9"] Jan 23 11:10:25 crc kubenswrapper[4689]: I0123 11:10:25.652234 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cea97349-f6bc-4cf6-af0f-5c1781edea31" path="/var/lib/kubelet/pods/cea97349-f6bc-4cf6-af0f-5c1781edea31/volumes" Jan 23 11:10:27 crc kubenswrapper[4689]: I0123 11:10:27.458465 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:27 crc kubenswrapper[4689]: I0123 11:10:27.458724 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:27 crc kubenswrapper[4689]: I0123 11:10:27.464715 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:28 crc kubenswrapper[4689]: I0123 11:10:28.003268 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 11:10:28 crc kubenswrapper[4689]: I0123 11:10:28.068431 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7dddff5c78-d9qgh"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.110545 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3b85b39c-5625-416e-9ddd-55845b645716","Type":"ContainerStarted","Data":"515409f68e520f4f4be2bd9177c962e43321e689d063af22a4244683f0f126cf"} Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.111615 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.114411 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"30b34da3-4b55-46a6-88c3-4bdfcbde2f66","Type":"ContainerStarted","Data":"a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2"} Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.115617 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.122233 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"5c276d1f-838f-4113-b343-18c150dfa59b","Type":"ContainerStarted","Data":"bfda6b25623a8b29e5c22987ac8a89118d9b62f304a00feba38a258b3e7f94dc"} Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.144644 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=21.345198818 podStartE2EDuration="30.144626188s" podCreationTimestamp="2026-01-23 11:10:03 +0000 UTC" firstStartedPulling="2026-01-23 11:10:23.47259328 +0000 UTC m=+1288.097273139" lastFinishedPulling="2026-01-23 11:10:32.27202065 +0000 UTC m=+1296.896700509" observedRunningTime="2026-01-23 11:10:33.125669797 +0000 UTC m=+1297.750349666" watchObservedRunningTime="2026-01-23 11:10:33.144626188 +0000 UTC m=+1297.769306047" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.150538 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=19.124771054 podStartE2EDuration="28.150524014s" podCreationTimestamp="2026-01-23 11:10:05 +0000 UTC" firstStartedPulling="2026-01-23 11:10:23.452168902 +0000 UTC m=+1288.076848761" lastFinishedPulling="2026-01-23 11:10:32.477921852 +0000 UTC m=+1297.102601721" observedRunningTime="2026-01-23 11:10:33.142225428 +0000 UTC m=+1297.766905297" watchObservedRunningTime="2026-01-23 11:10:33.150524014 +0000 UTC m=+1297.775203863" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.274852 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-plmwq"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.276896 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.279429 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.297867 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-plmwq"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.325938 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.326185 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.326291 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.327066 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eae776292d106a5845830ce3ec53dd7e23f7ffa3aa758190f8018f2db4651041"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.327201 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://eae776292d106a5845830ce3ec53dd7e23f7ffa3aa758190f8018f2db4651041" gracePeriod=600 Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.351096 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c974g\" (UniqueName: \"kubernetes.io/projected/833907d1-336e-417d-b362-bffe1f3521d3-kube-api-access-c974g\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.351209 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/833907d1-336e-417d-b362-bffe1f3521d3-ovn-rundir\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.351232 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/833907d1-336e-417d-b362-bffe1f3521d3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.351287 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/833907d1-336e-417d-b362-bffe1f3521d3-combined-ca-bundle\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.351312 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/833907d1-336e-417d-b362-bffe1f3521d3-config\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.351335 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/833907d1-336e-417d-b362-bffe1f3521d3-ovs-rundir\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.433844 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mrdnk"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.457325 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-cckbn"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.459966 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.460600 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/833907d1-336e-417d-b362-bffe1f3521d3-combined-ca-bundle\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.460654 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/833907d1-336e-417d-b362-bffe1f3521d3-config\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.460683 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/833907d1-336e-417d-b362-bffe1f3521d3-ovs-rundir\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.460753 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c974g\" (UniqueName: \"kubernetes.io/projected/833907d1-336e-417d-b362-bffe1f3521d3-kube-api-access-c974g\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.460829 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/833907d1-336e-417d-b362-bffe1f3521d3-ovn-rundir\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.460854 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/833907d1-336e-417d-b362-bffe1f3521d3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.461785 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/833907d1-336e-417d-b362-bffe1f3521d3-ovs-rundir\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.461990 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.462161 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/833907d1-336e-417d-b362-bffe1f3521d3-ovn-rundir\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.462206 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/833907d1-336e-417d-b362-bffe1f3521d3-config\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.471455 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/833907d1-336e-417d-b362-bffe1f3521d3-combined-ca-bundle\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.471689 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/833907d1-336e-417d-b362-bffe1f3521d3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.499804 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-cckbn"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.518015 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c974g\" (UniqueName: \"kubernetes.io/projected/833907d1-336e-417d-b362-bffe1f3521d3-kube-api-access-c974g\") pod \"ovn-controller-metrics-plmwq\" (UID: \"833907d1-336e-417d-b362-bffe1f3521d3\") " pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.564528 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpd8f\" (UniqueName: \"kubernetes.io/projected/7b364f11-6505-4150-976d-5424c3f8b686-kube-api-access-dpd8f\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.564837 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-config\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.565103 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.565399 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.620596 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-plmwq" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.666866 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.666999 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpd8f\" (UniqueName: \"kubernetes.io/projected/7b364f11-6505-4150-976d-5424c3f8b686-kube-api-access-dpd8f\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.667032 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-config\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.667160 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.668251 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.668939 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.669707 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-config\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.703448 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpd8f\" (UniqueName: \"kubernetes.io/projected/7b364f11-6505-4150-976d-5424c3f8b686-kube-api-access-dpd8f\") pod \"dnsmasq-dns-5bf47b49b7-cckbn\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.704992 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hfcbh"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.782311 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-l6zc2"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.783971 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.800720 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-l6zc2"] Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.803466 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.883778 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-dns-svc\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.885723 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.885866 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.885959 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-config\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.886162 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf7wt\" (UniqueName: \"kubernetes.io/projected/5ded0d37-bdeb-425a-bed5-c7130a590643-kube-api-access-wf7wt\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.924587 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.992261 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.992313 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.992344 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-config\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.992404 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf7wt\" (UniqueName: \"kubernetes.io/projected/5ded0d37-bdeb-425a-bed5-c7130a590643-kube-api-access-wf7wt\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.992462 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-dns-svc\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.993267 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.993323 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-dns-svc\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.994190 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-config\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:33 crc kubenswrapper[4689]: I0123 11:10:33.994202 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.139517 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cab355b0-25b6-4ad4-83ad-718ae756ae29","Type":"ContainerStarted","Data":"3a5a030927c7fe7bf3bf1502577e9aa21d3d89badd9e6419fdca14bb956b2bb3"} Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.141993 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"9926a3b2-8d65-4876-b56b-488948df1352","Type":"ContainerStarted","Data":"a3e8c566c58c5248ed2ec354fb36410fde0e3eb5ae246f762fd595401968f87a"} Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.156786 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf" event={"ID":"6c69c7bf-0e75-4bed-a212-2b7746d5ef88","Type":"ContainerStarted","Data":"7726eb83afd0985c83614fe30548ec572a32a51f50fea0041ac66f04d36d62fa"} Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.157642 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-s4nwf" Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.169685 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" event={"ID":"0c864284-1e65-4712-a75f-bad9506f55d6","Type":"ContainerStarted","Data":"98eec4778c576e0e0b2a2d207a78fd262d4f463db808dd5c672c15118f264388"} Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.191629 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="eae776292d106a5845830ce3ec53dd7e23f7ffa3aa758190f8018f2db4651041" exitCode=0 Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.191716 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"eae776292d106a5845830ce3ec53dd7e23f7ffa3aa758190f8018f2db4651041"} Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.191777 4689 scope.go:117] "RemoveContainer" containerID="d3cefa8656f04de15341e4eb597dc2badbed1ac4c35f5187204a360ab5c0ac81" Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.198950 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8245587e-b385-48bf-a684-2c72fedfb5d6","Type":"ContainerStarted","Data":"73cebc04e6639a76ffb31a8e815d66981b57d6fd7f2a0ed24eaf1f20b863e094"} Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.213860 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kfl9p" event={"ID":"f125458c-8822-4c87-a559-adf4f9387166","Type":"ContainerStarted","Data":"242d49812c4d690bd76182b3715d9e07e8b759c3686f9a3bbf7c32bbd4c5da12"} Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.223947 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-fn992" podStartSLOduration=20.034046902 podStartE2EDuration="28.223929538s" podCreationTimestamp="2026-01-23 11:10:06 +0000 UTC" firstStartedPulling="2026-01-23 11:10:23.478435915 +0000 UTC m=+1288.103115774" lastFinishedPulling="2026-01-23 11:10:31.668318551 +0000 UTC m=+1296.292998410" observedRunningTime="2026-01-23 11:10:34.215471938 +0000 UTC m=+1298.840151797" watchObservedRunningTime="2026-01-23 11:10:34.223929538 +0000 UTC m=+1298.848609397" Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.268380 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-s4nwf" podStartSLOduration=17.053878626 podStartE2EDuration="25.268364634s" podCreationTimestamp="2026-01-23 11:10:09 +0000 UTC" firstStartedPulling="2026-01-23 11:10:23.453866274 +0000 UTC m=+1288.078546133" lastFinishedPulling="2026-01-23 11:10:31.668352282 +0000 UTC m=+1296.293032141" observedRunningTime="2026-01-23 11:10:34.265600626 +0000 UTC m=+1298.890280505" watchObservedRunningTime="2026-01-23 11:10:34.268364634 +0000 UTC m=+1298.893044483" Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.372940 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf7wt\" (UniqueName: \"kubernetes.io/projected/5ded0d37-bdeb-425a-bed5-c7130a590643-kube-api-access-wf7wt\") pod \"dnsmasq-dns-8554648995-l6zc2\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.441749 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.720964 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-plmwq"] Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.942790 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-cckbn"] Jan 23 11:10:34 crc kubenswrapper[4689]: I0123 11:10:34.963520 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.036659 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-config\") pod \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.036761 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-dns-svc\") pod \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.036861 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssmz4\" (UniqueName: \"kubernetes.io/projected/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-kube-api-access-ssmz4\") pod \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\" (UID: \"c88bc3f8-37ac-4592-85d3-a5e048f48e1d\") " Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.037807 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c88bc3f8-37ac-4592-85d3-a5e048f48e1d" (UID: "c88bc3f8-37ac-4592-85d3-a5e048f48e1d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.038282 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-config" (OuterVolumeSpecName: "config") pod "c88bc3f8-37ac-4592-85d3-a5e048f48e1d" (UID: "c88bc3f8-37ac-4592-85d3-a5e048f48e1d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.038983 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.039008 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.182305 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-kube-api-access-ssmz4" (OuterVolumeSpecName: "kube-api-access-ssmz4") pod "c88bc3f8-37ac-4592-85d3-a5e048f48e1d" (UID: "c88bc3f8-37ac-4592-85d3-a5e048f48e1d"). InnerVolumeSpecName "kube-api-access-ssmz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.242477 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssmz4\" (UniqueName: \"kubernetes.io/projected/c88bc3f8-37ac-4592-85d3-a5e048f48e1d-kube-api-access-ssmz4\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.252378 4689 generic.go:334] "Generic (PLEG): container finished" podID="f125458c-8822-4c87-a559-adf4f9387166" containerID="242d49812c4d690bd76182b3715d9e07e8b759c3686f9a3bbf7c32bbd4c5da12" exitCode=0 Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.252775 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kfl9p" event={"ID":"f125458c-8822-4c87-a559-adf4f9387166","Type":"ContainerDied","Data":"242d49812c4d690bd76182b3715d9e07e8b759c3686f9a3bbf7c32bbd4c5da12"} Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.256311 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" event={"ID":"c88bc3f8-37ac-4592-85d3-a5e048f48e1d","Type":"ContainerDied","Data":"4d74c3720661ae1410fe2c21af133a80ebf8d59adfb626b70d873d817535e61c"} Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.256399 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-mrdnk" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.257605 4689 generic.go:334] "Generic (PLEG): container finished" podID="735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" containerID="9e77fd2de893e597b6470d345d6fc91576dad1e9f9987e6a37d7f7cc4e89b817" exitCode=0 Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.257655 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" event={"ID":"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f","Type":"ContainerDied","Data":"9e77fd2de893e597b6470d345d6fc91576dad1e9f9987e6a37d7f7cc4e89b817"} Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.259755 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-plmwq" event={"ID":"833907d1-336e-417d-b362-bffe1f3521d3","Type":"ContainerStarted","Data":"355c61f2c635edc4ac5024744ffbc8b093dcb01b3f71f0df06a699686dbf52d3"} Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.261758 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" event={"ID":"7b364f11-6505-4150-976d-5424c3f8b686","Type":"ContainerStarted","Data":"59502ff006236e5a2640c197050136b13c7c1da9456f1fd7356170323c1d8956"} Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.355048 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-l6zc2"] Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.372116 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mrdnk"] Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.386528 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-mrdnk"] Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.655671 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c88bc3f8-37ac-4592-85d3-a5e048f48e1d" path="/var/lib/kubelet/pods/c88bc3f8-37ac-4592-85d3-a5e048f48e1d/volumes" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.849669 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.965604 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-dns-svc\") pod \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.965957 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-config\") pod \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.966112 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsskj\" (UniqueName: \"kubernetes.io/projected/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-kube-api-access-jsskj\") pod \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\" (UID: \"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f\") " Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.972371 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-kube-api-access-jsskj" (OuterVolumeSpecName: "kube-api-access-jsskj") pod "735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" (UID: "735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f"). InnerVolumeSpecName "kube-api-access-jsskj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.990376 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" (UID: "735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:35 crc kubenswrapper[4689]: I0123 11:10:35.992378 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-config" (OuterVolumeSpecName: "config") pod "735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" (UID: "735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.068906 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsskj\" (UniqueName: \"kubernetes.io/projected/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-kube-api-access-jsskj\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.068943 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.068957 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.289450 4689 generic.go:334] "Generic (PLEG): container finished" podID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerID="17ac550e891ed1780556e8d43714f65a66e3b79c9cafa1f2e27eaef940d51793" exitCode=0 Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.289511 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-l6zc2" event={"ID":"5ded0d37-bdeb-425a-bed5-c7130a590643","Type":"ContainerDied","Data":"17ac550e891ed1780556e8d43714f65a66e3b79c9cafa1f2e27eaef940d51793"} Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.289803 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-l6zc2" event={"ID":"5ded0d37-bdeb-425a-bed5-c7130a590643","Type":"ContainerStarted","Data":"05dfb70a3bf3e4bd6b549ca9e75d10e3c8c284f824a8769b7ff7845dc71d987d"} Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.293034 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" event={"ID":"735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f","Type":"ContainerDied","Data":"c225f07bb7713df8586efb5cef079736e3fa30485dfd9f71c5d39141023748a7"} Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.293065 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hfcbh" Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.293067 4689 scope.go:117] "RemoveContainer" containerID="9e77fd2de893e597b6470d345d6fc91576dad1e9f9987e6a37d7f7cc4e89b817" Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.294811 4689 generic.go:334] "Generic (PLEG): container finished" podID="7b364f11-6505-4150-976d-5424c3f8b686" containerID="f5324cb131154b647b26f119828d0b67392e37cfa51c198b1eb3506a8880198a" exitCode=0 Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.294872 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" event={"ID":"7b364f11-6505-4150-976d-5424c3f8b686","Type":"ContainerDied","Data":"f5324cb131154b647b26f119828d0b67392e37cfa51c198b1eb3506a8880198a"} Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.297408 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e"} Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.300787 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kfl9p" event={"ID":"f125458c-8822-4c87-a559-adf4f9387166","Type":"ContainerStarted","Data":"7ba540731840d5f986beaec70c96a6b1167694edd4a8e86a7585cdc96aa95fe4"} Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.303166 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerStarted","Data":"acf99f6d01714c870d76ca8b6d3727de27773a6b7a39091d8fa7610262a22db9"} Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.430692 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hfcbh"] Jan 23 11:10:36 crc kubenswrapper[4689]: I0123 11:10:36.441072 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hfcbh"] Jan 23 11:10:37 crc kubenswrapper[4689]: I0123 11:10:37.656428 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" path="/var/lib/kubelet/pods/735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f/volumes" Jan 23 11:10:39 crc kubenswrapper[4689]: I0123 11:10:39.072392 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 23 11:10:40 crc kubenswrapper[4689]: I0123 11:10:40.346059 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kfl9p" event={"ID":"f125458c-8822-4c87-a559-adf4f9387166","Type":"ContainerStarted","Data":"c34f61273f62b7d50098d49f6774e47b58742047027ec71d41d9d9f2358b1cc5"} Jan 23 11:10:41 crc kubenswrapper[4689]: I0123 11:10:41.357760 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" event={"ID":"7b364f11-6505-4150-976d-5424c3f8b686","Type":"ContainerStarted","Data":"676dc0fb6a00bde6fe2b2a221e1d773239b5d1a796c29a38a34f2b035fab356b"} Jan 23 11:10:41 crc kubenswrapper[4689]: I0123 11:10:41.360067 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-l6zc2" event={"ID":"5ded0d37-bdeb-425a-bed5-c7130a590643","Type":"ContainerStarted","Data":"9e0be4ceb870679d2d7a320424ff1f891fced4fefce8395b379944cea9ebb218"} Jan 23 11:10:41 crc kubenswrapper[4689]: I0123 11:10:41.360186 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:41 crc kubenswrapper[4689]: I0123 11:10:41.389599 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-kfl9p" podStartSLOduration=24.141998714 podStartE2EDuration="32.389575774s" podCreationTimestamp="2026-01-23 11:10:09 +0000 UTC" firstStartedPulling="2026-01-23 11:10:23.422762601 +0000 UTC m=+1288.047442460" lastFinishedPulling="2026-01-23 11:10:31.670339661 +0000 UTC m=+1296.295019520" observedRunningTime="2026-01-23 11:10:41.383507632 +0000 UTC m=+1306.008187491" watchObservedRunningTime="2026-01-23 11:10:41.389575774 +0000 UTC m=+1306.014255633" Jan 23 11:10:42 crc kubenswrapper[4689]: I0123 11:10:42.378481 4689 generic.go:334] "Generic (PLEG): container finished" podID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerID="acf99f6d01714c870d76ca8b6d3727de27773a6b7a39091d8fa7610262a22db9" exitCode=0 Jan 23 11:10:42 crc kubenswrapper[4689]: I0123 11:10:42.378582 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerDied","Data":"acf99f6d01714c870d76ca8b6d3727de27773a6b7a39091d8fa7610262a22db9"} Jan 23 11:10:42 crc kubenswrapper[4689]: I0123 11:10:42.379282 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:42 crc kubenswrapper[4689]: I0123 11:10:42.379313 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:10:42 crc kubenswrapper[4689]: I0123 11:10:42.404328 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-l6zc2" podStartSLOduration=9.404303748 podStartE2EDuration="9.404303748s" podCreationTimestamp="2026-01-23 11:10:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:10:42.397695523 +0000 UTC m=+1307.022375382" watchObservedRunningTime="2026-01-23 11:10:42.404303748 +0000 UTC m=+1307.028983607" Jan 23 11:10:42 crc kubenswrapper[4689]: I0123 11:10:42.417008 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" podStartSLOduration=9.416984613 podStartE2EDuration="9.416984613s" podCreationTimestamp="2026-01-23 11:10:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:10:42.415238659 +0000 UTC m=+1307.039918548" watchObservedRunningTime="2026-01-23 11:10:42.416984613 +0000 UTC m=+1307.041664482" Jan 23 11:10:43 crc kubenswrapper[4689]: I0123 11:10:43.925693 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.032653 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-cckbn"] Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.033215 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="dnsmasq-dns" containerID="cri-o://676dc0fb6a00bde6fe2b2a221e1d773239b5d1a796c29a38a34f2b035fab356b" gracePeriod=10 Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.035328 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.068793 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-whrz5"] Jan 23 11:10:46 crc kubenswrapper[4689]: E0123 11:10:46.069269 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" containerName="init" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.069291 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" containerName="init" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.069497 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="735b548b-8f9c-4bbe-9dc0-9bed36c7cb7f" containerName="init" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.070585 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.107951 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-whrz5"] Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.177010 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.184811 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.184884 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-config\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.185089 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.185219 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.185351 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxx5j\" (UniqueName: \"kubernetes.io/projected/407dd7c8-3c4f-4345-a26e-29f59646bf5e-kube-api-access-lxx5j\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.286898 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.286994 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-config\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.287794 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-config\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.287798 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.287893 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.287938 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.288017 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxx5j\" (UniqueName: \"kubernetes.io/projected/407dd7c8-3c4f-4345-a26e-29f59646bf5e-kube-api-access-lxx5j\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.288697 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.288757 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.310093 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxx5j\" (UniqueName: \"kubernetes.io/projected/407dd7c8-3c4f-4345-a26e-29f59646bf5e-kube-api-access-lxx5j\") pod \"dnsmasq-dns-b8fbc5445-whrz5\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:46 crc kubenswrapper[4689]: I0123 11:10:46.397010 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.140631 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.156952 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.159508 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-kh99f" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.159726 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.160108 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.160707 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.166199 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.309496 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/502e87fb-9e46-41c3-929e-c007018641db-lock\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.310889 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c4m9\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-kube-api-access-2c4m9\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.312854 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-faece91c-2540-4f31-971b-fdff0f89a359\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-faece91c-2540-4f31-971b-fdff0f89a359\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.312899 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/502e87fb-9e46-41c3-929e-c007018641db-cache\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.312934 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.313031 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502e87fb-9e46-41c3-929e-c007018641db-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.380480 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-sb9zz"] Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.381856 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.383985 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.384380 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.384560 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.390573 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-sb9zz"] Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.414800 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c4m9\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-kube-api-access-2c4m9\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.414952 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-faece91c-2540-4f31-971b-fdff0f89a359\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-faece91c-2540-4f31-971b-fdff0f89a359\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.414979 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/502e87fb-9e46-41c3-929e-c007018641db-cache\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.415001 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.415049 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502e87fb-9e46-41c3-929e-c007018641db-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.415105 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/502e87fb-9e46-41c3-929e-c007018641db-lock\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.421212 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/502e87fb-9e46-41c3-929e-c007018641db-cache\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.421238 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/502e87fb-9e46-41c3-929e-c007018641db-lock\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: E0123 11:10:47.421391 4689 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 11:10:47 crc kubenswrapper[4689]: E0123 11:10:47.421426 4689 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 11:10:47 crc kubenswrapper[4689]: E0123 11:10:47.421528 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift podName:502e87fb-9e46-41c3-929e-c007018641db nodeName:}" failed. No retries permitted until 2026-01-23 11:10:47.921456183 +0000 UTC m=+1312.546136042 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift") pod "swift-storage-0" (UID: "502e87fb-9e46-41c3-929e-c007018641db") : configmap "swift-ring-files" not found Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.424162 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.424203 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-faece91c-2540-4f31-971b-fdff0f89a359\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-faece91c-2540-4f31-971b-fdff0f89a359\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/42eee692616cc5e3706296907f69d5c3778dbefe67b2685c91fc082b16e8d87e/globalmount\"" pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.433003 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502e87fb-9e46-41c3-929e-c007018641db-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.450996 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c4m9\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-kube-api-access-2c4m9\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.454863 4689 generic.go:334] "Generic (PLEG): container finished" podID="7b364f11-6505-4150-976d-5424c3f8b686" containerID="676dc0fb6a00bde6fe2b2a221e1d773239b5d1a796c29a38a34f2b035fab356b" exitCode=0 Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.454923 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" event={"ID":"7b364f11-6505-4150-976d-5424c3f8b686","Type":"ContainerDied","Data":"676dc0fb6a00bde6fe2b2a221e1d773239b5d1a796c29a38a34f2b035fab356b"} Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.483318 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-faece91c-2540-4f31-971b-fdff0f89a359\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-faece91c-2540-4f31-971b-fdff0f89a359\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.522283 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-scripts\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.522359 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-combined-ca-bundle\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.522456 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-dispersionconf\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.522704 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-swiftconf\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.522754 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9w2m\" (UniqueName: \"kubernetes.io/projected/3a33baed-7a5b-44f9-b344-114919fa316b-kube-api-access-j9w2m\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.522790 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-ring-data-devices\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.522823 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3a33baed-7a5b-44f9-b344-114919fa316b-etc-swift\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.625210 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9w2m\" (UniqueName: \"kubernetes.io/projected/3a33baed-7a5b-44f9-b344-114919fa316b-kube-api-access-j9w2m\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.625277 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-ring-data-devices\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.625312 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3a33baed-7a5b-44f9-b344-114919fa316b-etc-swift\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.625347 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-scripts\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.625375 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-combined-ca-bundle\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.625478 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-dispersionconf\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.626289 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3a33baed-7a5b-44f9-b344-114919fa316b-etc-swift\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.626484 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-swiftconf\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.626992 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-scripts\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.627622 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-ring-data-devices\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.630115 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-swiftconf\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.634294 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-combined-ca-bundle\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.644585 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9w2m\" (UniqueName: \"kubernetes.io/projected/3a33baed-7a5b-44f9-b344-114919fa316b-kube-api-access-j9w2m\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.644600 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-dispersionconf\") pod \"swift-ring-rebalance-sb9zz\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.707983 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:10:47 crc kubenswrapper[4689]: I0123 11:10:47.933511 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:47 crc kubenswrapper[4689]: E0123 11:10:47.933735 4689 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 11:10:47 crc kubenswrapper[4689]: E0123 11:10:47.933751 4689 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 11:10:47 crc kubenswrapper[4689]: E0123 11:10:47.933792 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift podName:502e87fb-9e46-41c3-929e-c007018641db nodeName:}" failed. No retries permitted until 2026-01-23 11:10:48.933778569 +0000 UTC m=+1313.558458428 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift") pod "swift-storage-0" (UID: "502e87fb-9e46-41c3-929e-c007018641db") : configmap "swift-ring-files" not found Jan 23 11:10:48 crc kubenswrapper[4689]: I0123 11:10:48.956283 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:48 crc kubenswrapper[4689]: E0123 11:10:48.956563 4689 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 11:10:48 crc kubenswrapper[4689]: E0123 11:10:48.956604 4689 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 11:10:48 crc kubenswrapper[4689]: E0123 11:10:48.956687 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift podName:502e87fb-9e46-41c3-929e-c007018641db nodeName:}" failed. No retries permitted until 2026-01-23 11:10:50.956658565 +0000 UTC m=+1315.581338444 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift") pod "swift-storage-0" (UID: "502e87fb-9e46-41c3-929e-c007018641db") : configmap "swift-ring-files" not found Jan 23 11:10:49 crc kubenswrapper[4689]: I0123 11:10:49.443286 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:10:50 crc kubenswrapper[4689]: I0123 11:10:50.999284 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:51 crc kubenswrapper[4689]: E0123 11:10:50.999507 4689 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 11:10:51 crc kubenswrapper[4689]: E0123 11:10:50.999725 4689 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 11:10:51 crc kubenswrapper[4689]: E0123 11:10:50.999778 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift podName:502e87fb-9e46-41c3-929e-c007018641db nodeName:}" failed. No retries permitted until 2026-01-23 11:10:54.999763104 +0000 UTC m=+1319.624442963 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift") pod "swift-storage-0" (UID: "502e87fb-9e46-41c3-929e-c007018641db") : configmap "swift-ring-files" not found Jan 23 11:10:52 crc kubenswrapper[4689]: I0123 11:10:52.500798 4689 generic.go:334] "Generic (PLEG): container finished" podID="9926a3b2-8d65-4876-b56b-488948df1352" containerID="a3e8c566c58c5248ed2ec354fb36410fde0e3eb5ae246f762fd595401968f87a" exitCode=0 Jan 23 11:10:52 crc kubenswrapper[4689]: I0123 11:10:52.500898 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"9926a3b2-8d65-4876-b56b-488948df1352","Type":"ContainerDied","Data":"a3e8c566c58c5248ed2ec354fb36410fde0e3eb5ae246f762fd595401968f87a"} Jan 23 11:10:53 crc kubenswrapper[4689]: I0123 11:10:53.149041 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-7dddff5c78-d9qgh" podUID="7525f02f-3c52-4e75-bace-0d3e1bedeee8" containerName="console" containerID="cri-o://1d3c4f56fa132b794c20a0fa78b85fcfe63ddb572740091b0c426c19d73fa68d" gracePeriod=15 Jan 23 11:10:53 crc kubenswrapper[4689]: E0123 11:10:53.339268 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7525f02f_3c52_4e75_bace_0d3e1bedeee8.slice/crio-1d3c4f56fa132b794c20a0fa78b85fcfe63ddb572740091b0c426c19d73fa68d.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:10:53 crc kubenswrapper[4689]: I0123 11:10:53.927575 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: i/o timeout" Jan 23 11:10:54 crc kubenswrapper[4689]: I0123 11:10:54.523901 4689 generic.go:334] "Generic (PLEG): container finished" podID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerID="3a5a030927c7fe7bf3bf1502577e9aa21d3d89badd9e6419fdca14bb956b2bb3" exitCode=0 Jan 23 11:10:54 crc kubenswrapper[4689]: I0123 11:10:54.523982 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cab355b0-25b6-4ad4-83ad-718ae756ae29","Type":"ContainerDied","Data":"3a5a030927c7fe7bf3bf1502577e9aa21d3d89badd9e6419fdca14bb956b2bb3"} Jan 23 11:10:54 crc kubenswrapper[4689]: I0123 11:10:54.526445 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7dddff5c78-d9qgh_7525f02f-3c52-4e75-bace-0d3e1bedeee8/console/0.log" Jan 23 11:10:54 crc kubenswrapper[4689]: I0123 11:10:54.526487 4689 generic.go:334] "Generic (PLEG): container finished" podID="7525f02f-3c52-4e75-bace-0d3e1bedeee8" containerID="1d3c4f56fa132b794c20a0fa78b85fcfe63ddb572740091b0c426c19d73fa68d" exitCode=2 Jan 23 11:10:54 crc kubenswrapper[4689]: I0123 11:10:54.526513 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dddff5c78-d9qgh" event={"ID":"7525f02f-3c52-4e75-bace-0d3e1bedeee8","Type":"ContainerDied","Data":"1d3c4f56fa132b794c20a0fa78b85fcfe63ddb572740091b0c426c19d73fa68d"} Jan 23 11:10:55 crc kubenswrapper[4689]: I0123 11:10:55.091239 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:10:55 crc kubenswrapper[4689]: E0123 11:10:55.091566 4689 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 11:10:55 crc kubenswrapper[4689]: E0123 11:10:55.091622 4689 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 11:10:55 crc kubenswrapper[4689]: E0123 11:10:55.091688 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift podName:502e87fb-9e46-41c3-929e-c007018641db nodeName:}" failed. No retries permitted until 2026-01-23 11:11:03.09166442 +0000 UTC m=+1327.716344289 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift") pod "swift-storage-0" (UID: "502e87fb-9e46-41c3-929e-c007018641db") : configmap "swift-ring-files" not found Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.545716 4689 generic.go:334] "Generic (PLEG): container finished" podID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerID="7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770" exitCode=0 Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.549868 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8287b3f6-975a-4082-a086-bd1ee9ec4d7b","Type":"ContainerDied","Data":"7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770"} Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.553789 4689 generic.go:334] "Generic (PLEG): container finished" podID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerID="8c5b9da8c95f2018a8bac1dadf51c7270e4e896fb8da259fd6208b603520d69d" exitCode=0 Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.554305 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"84c84a76-3fda-4d1e-bc46-e806b5462845","Type":"ContainerDied","Data":"8c5b9da8c95f2018a8bac1dadf51c7270e4e896fb8da259fd6208b603520d69d"} Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.556508 4689 generic.go:334] "Generic (PLEG): container finished" podID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerID="56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482" exitCode=0 Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.556615 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"cee46a2e-5707-4ade-a456-ed3466f9e969","Type":"ContainerDied","Data":"56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482"} Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.558684 4689 generic.go:334] "Generic (PLEG): container finished" podID="126daef6-1490-45c1-898a-b51a0b069546" containerID="d16acc7bda60fa122cc4c7a5a7c8aa08193fdfa0635393cc61f64782683992c8" exitCode=0 Jan 23 11:10:56 crc kubenswrapper[4689]: I0123 11:10:56.558736 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"126daef6-1490-45c1-898a-b51a0b069546","Type":"ContainerDied","Data":"d16acc7bda60fa122cc4c7a5a7c8aa08193fdfa0635393cc61f64782683992c8"} Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.131574 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.235467 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpd8f\" (UniqueName: \"kubernetes.io/projected/7b364f11-6505-4150-976d-5424c3f8b686-kube-api-access-dpd8f\") pod \"7b364f11-6505-4150-976d-5424c3f8b686\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.235523 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-config\") pod \"7b364f11-6505-4150-976d-5424c3f8b686\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.235576 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-ovsdbserver-nb\") pod \"7b364f11-6505-4150-976d-5424c3f8b686\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.235726 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-dns-svc\") pod \"7b364f11-6505-4150-976d-5424c3f8b686\" (UID: \"7b364f11-6505-4150-976d-5424c3f8b686\") " Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.243435 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b364f11-6505-4150-976d-5424c3f8b686-kube-api-access-dpd8f" (OuterVolumeSpecName: "kube-api-access-dpd8f") pod "7b364f11-6505-4150-976d-5424c3f8b686" (UID: "7b364f11-6505-4150-976d-5424c3f8b686"). InnerVolumeSpecName "kube-api-access-dpd8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.279118 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7b364f11-6505-4150-976d-5424c3f8b686" (UID: "7b364f11-6505-4150-976d-5424c3f8b686"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.281748 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7b364f11-6505-4150-976d-5424c3f8b686" (UID: "7b364f11-6505-4150-976d-5424c3f8b686"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.289033 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-config" (OuterVolumeSpecName: "config") pod "7b364f11-6505-4150-976d-5424c3f8b686" (UID: "7b364f11-6505-4150-976d-5424c3f8b686"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.338462 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpd8f\" (UniqueName: \"kubernetes.io/projected/7b364f11-6505-4150-976d-5424c3f8b686-kube-api-access-dpd8f\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.338503 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.338534 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.338544 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b364f11-6505-4150-976d-5424c3f8b686-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.573173 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" event={"ID":"7b364f11-6505-4150-976d-5424c3f8b686","Type":"ContainerDied","Data":"59502ff006236e5a2640c197050136b13c7c1da9456f1fd7356170323c1d8956"} Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.573220 4689 scope.go:117] "RemoveContainer" containerID="676dc0fb6a00bde6fe2b2a221e1d773239b5d1a796c29a38a34f2b035fab356b" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.573316 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.615188 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-cckbn"] Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.624099 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-cckbn"] Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.657685 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b364f11-6505-4150-976d-5424c3f8b686" path="/var/lib/kubelet/pods/7b364f11-6505-4150-976d-5424c3f8b686/volumes" Jan 23 11:10:57 crc kubenswrapper[4689]: E0123 11:10:57.856549 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Jan 23 11:10:57 crc kubenswrapper[4689]: E0123 11:10:57.856711 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n669h5d6h66chcch576h564h5b7h75h8h57dh65dh9bh9h5fch658h67bh676h68chc6h648h55bh578h577hb6h5ddh5dfh7bh56bhc5h96h57ch5cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5ttxx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-sb-0_openstack(5c276d1f-838f-4113-b343-18c150dfa59b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:10:57 crc kubenswrapper[4689]: E0123 11:10:57.857892 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-sb-0" podUID="5c276d1f-838f-4113-b343-18c150dfa59b" Jan 23 11:10:57 crc kubenswrapper[4689]: I0123 11:10:57.922459 4689 scope.go:117] "RemoveContainer" containerID="f5324cb131154b647b26f119828d0b67392e37cfa51c198b1eb3506a8880198a" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.238571 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.238972 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n569h598h659h5c5h58bh685h65bhfbh5h57dhdfh577h98hc7hcbh5b4h5c6h5c6h89hfch674h5bdh55h67ch59ch574h675hc7hbbhdh5dh555q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovs-rundir,ReadOnly:true,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-rundir,ReadOnly:true,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c974g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-metrics-plmwq_openstack(833907d1-336e-417d-b362-bffe1f3521d3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.240257 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-metrics-plmwq" podUID="833907d1-336e-417d-b362-bffe1f3521d3" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.323391 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.323519 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n5cbh5bbh595h89h85h5bdhb9h576hd7hb4h645h56h594h75h5d4h85h598hb5h688h5c9h5fchcch554h547h599h5h666hd7h659h54bh55bh549q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-98xcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(8245587e-b385-48bf-a684-2c72fedfb5d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.324647 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="8245587e-b385-48bf-a684-2c72fedfb5d6" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.375467 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-sb9zz"] Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.380792 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.386972 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-whrz5"] Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.592938 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sb9zz" event={"ID":"3a33baed-7a5b-44f9-b344-114919fa316b","Type":"ContainerStarted","Data":"f8359242de17f7452b8419360cfa43bb6bab61f2b81838e1519d3140fb026557"} Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.594924 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" event={"ID":"407dd7c8-3c4f-4345-a26e-29f59646bf5e","Type":"ContainerStarted","Data":"2aeaf67fc3e330c740bb7f14d654be28e557b8d35aa3645a07bc0edc5e43bac8"} Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.596922 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="8245587e-b385-48bf-a684-2c72fedfb5d6" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.596959 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovn-controller-metrics-plmwq" podUID="833907d1-336e-417d-b362-bffe1f3521d3" Jan 23 11:10:58 crc kubenswrapper[4689]: E0123 11:10:58.597397 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="5c276d1f-838f-4113-b343-18c150dfa59b" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.610355 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7dddff5c78-d9qgh_7525f02f-3c52-4e75-bace-0d3e1bedeee8/console/0.log" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.610465 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.769117 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvbd8\" (UniqueName: \"kubernetes.io/projected/7525f02f-3c52-4e75-bace-0d3e1bedeee8-kube-api-access-xvbd8\") pod \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.769314 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-service-ca\") pod \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.769357 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-serving-cert\") pod \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.769411 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-config\") pod \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.769435 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-oauth-config\") pod \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.769496 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-oauth-serving-cert\") pod \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.770279 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "7525f02f-3c52-4e75-bace-0d3e1bedeee8" (UID: "7525f02f-3c52-4e75-bace-0d3e1bedeee8"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.770308 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-config" (OuterVolumeSpecName: "console-config") pod "7525f02f-3c52-4e75-bace-0d3e1bedeee8" (UID: "7525f02f-3c52-4e75-bace-0d3e1bedeee8"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.770370 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-service-ca" (OuterVolumeSpecName: "service-ca") pod "7525f02f-3c52-4e75-bace-0d3e1bedeee8" (UID: "7525f02f-3c52-4e75-bace-0d3e1bedeee8"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.770386 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-trusted-ca-bundle\") pod \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\" (UID: \"7525f02f-3c52-4e75-bace-0d3e1bedeee8\") " Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.770982 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "7525f02f-3c52-4e75-bace-0d3e1bedeee8" (UID: "7525f02f-3c52-4e75-bace-0d3e1bedeee8"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.773647 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "7525f02f-3c52-4e75-bace-0d3e1bedeee8" (UID: "7525f02f-3c52-4e75-bace-0d3e1bedeee8"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.774223 4689 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.774263 4689 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.774275 4689 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.774284 4689 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7525f02f-3c52-4e75-bace-0d3e1bedeee8-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.774413 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7525f02f-3c52-4e75-bace-0d3e1bedeee8-kube-api-access-xvbd8" (OuterVolumeSpecName: "kube-api-access-xvbd8") pod "7525f02f-3c52-4e75-bace-0d3e1bedeee8" (UID: "7525f02f-3c52-4e75-bace-0d3e1bedeee8"). InnerVolumeSpecName "kube-api-access-xvbd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.774655 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "7525f02f-3c52-4e75-bace-0d3e1bedeee8" (UID: "7525f02f-3c52-4e75-bace-0d3e1bedeee8"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.876705 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvbd8\" (UniqueName: \"kubernetes.io/projected/7525f02f-3c52-4e75-bace-0d3e1bedeee8-kube-api-access-xvbd8\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.876777 4689 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.876788 4689 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7525f02f-3c52-4e75-bace-0d3e1bedeee8-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:10:58 crc kubenswrapper[4689]: I0123 11:10:58.928412 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5bf47b49b7-cckbn" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: i/o timeout" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.513895 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.609519 4689 generic.go:334] "Generic (PLEG): container finished" podID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerID="08ac6691028a298f51f6c0c1b8fa5123b4863d8f0c6476f72c42d29a12f697fa" exitCode=0 Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.609598 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" event={"ID":"407dd7c8-3c4f-4345-a26e-29f59646bf5e","Type":"ContainerDied","Data":"08ac6691028a298f51f6c0c1b8fa5123b4863d8f0c6476f72c42d29a12f697fa"} Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.619864 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"126daef6-1490-45c1-898a-b51a0b069546","Type":"ContainerStarted","Data":"4e3411d8d7429c83dfdaa7e2793aa4c0dfcd7977a791ce5fb3f4967130172f84"} Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.620830 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.626699 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8287b3f6-975a-4082-a086-bd1ee9ec4d7b","Type":"ContainerStarted","Data":"12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932"} Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.627090 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.633846 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"84c84a76-3fda-4d1e-bc46-e806b5462845","Type":"ContainerStarted","Data":"bfd646fa019b71a59e5091a2a0fab6b2be77c77a25c89706baf6b038b6f7b2da"} Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.635038 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.639101 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cab355b0-25b6-4ad4-83ad-718ae756ae29","Type":"ContainerStarted","Data":"39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8"} Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.643656 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7dddff5c78-d9qgh_7525f02f-3c52-4e75-bace-0d3e1bedeee8/console/0.log" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.643802 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.688801 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"9926a3b2-8d65-4876-b56b-488948df1352","Type":"ContainerStarted","Data":"c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c"} Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.688921 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dddff5c78-d9qgh" event={"ID":"7525f02f-3c52-4e75-bace-0d3e1bedeee8","Type":"ContainerDied","Data":"8930d72675bd847372ab585da2567cfc55b301bfbda38d8c141834eb0faffd65"} Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.688941 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"cee46a2e-5707-4ade-a456-ed3466f9e969","Type":"ContainerStarted","Data":"3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf"} Jan 23 11:10:59 crc kubenswrapper[4689]: E0123 11:10:59.689847 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="8245587e-b385-48bf-a684-2c72fedfb5d6" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.690390 4689 scope.go:117] "RemoveContainer" containerID="1d3c4f56fa132b794c20a0fa78b85fcfe63ddb572740091b0c426c19d73fa68d" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.690787 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.757612 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=49.714288025 podStartE2EDuration="1m0.757590387s" podCreationTimestamp="2026-01-23 11:09:59 +0000 UTC" firstStartedPulling="2026-01-23 11:10:10.820390224 +0000 UTC m=+1275.445070083" lastFinishedPulling="2026-01-23 11:10:21.863692586 +0000 UTC m=+1286.488372445" observedRunningTime="2026-01-23 11:10:59.747476096 +0000 UTC m=+1324.372155975" watchObservedRunningTime="2026-01-23 11:10:59.757590387 +0000 UTC m=+1324.382270256" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.781454 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=49.736774554 podStartE2EDuration="1m0.78142841s" podCreationTimestamp="2026-01-23 11:09:59 +0000 UTC" firstStartedPulling="2026-01-23 11:10:10.812822145 +0000 UTC m=+1275.437502024" lastFinishedPulling="2026-01-23 11:10:21.857476021 +0000 UTC m=+1286.482155880" observedRunningTime="2026-01-23 11:10:59.723290395 +0000 UTC m=+1324.347970274" watchObservedRunningTime="2026-01-23 11:10:59.78142841 +0000 UTC m=+1324.406108269" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.810970 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=40.618731999 podStartE2EDuration="1m0.810955306s" podCreationTimestamp="2026-01-23 11:09:59 +0000 UTC" firstStartedPulling="2026-01-23 11:10:01.566345694 +0000 UTC m=+1266.191025553" lastFinishedPulling="2026-01-23 11:10:21.758569001 +0000 UTC m=+1286.383248860" observedRunningTime="2026-01-23 11:10:59.807886289 +0000 UTC m=+1324.432566158" watchObservedRunningTime="2026-01-23 11:10:59.810955306 +0000 UTC m=+1324.435635165" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.848976 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=46.999560884 podStartE2EDuration="57.848954341s" podCreationTimestamp="2026-01-23 11:10:02 +0000 UTC" firstStartedPulling="2026-01-23 11:10:20.713005099 +0000 UTC m=+1285.337684958" lastFinishedPulling="2026-01-23 11:10:31.562398556 +0000 UTC m=+1296.187078415" observedRunningTime="2026-01-23 11:10:59.838419388 +0000 UTC m=+1324.463099247" watchObservedRunningTime="2026-01-23 11:10:59.848954341 +0000 UTC m=+1324.473634190" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.859327 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=50.296245549 podStartE2EDuration="58.859257017s" podCreationTimestamp="2026-01-23 11:10:01 +0000 UTC" firstStartedPulling="2026-01-23 11:10:22.886579082 +0000 UTC m=+1287.511258941" lastFinishedPulling="2026-01-23 11:10:31.44959055 +0000 UTC m=+1296.074270409" observedRunningTime="2026-01-23 11:10:59.857243347 +0000 UTC m=+1324.481923206" watchObservedRunningTime="2026-01-23 11:10:59.859257017 +0000 UTC m=+1324.483936876" Jan 23 11:10:59 crc kubenswrapper[4689]: I0123 11:10:59.905915 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=40.743923882 podStartE2EDuration="1m0.905897527s" podCreationTimestamp="2026-01-23 11:09:59 +0000 UTC" firstStartedPulling="2026-01-23 11:10:01.681681273 +0000 UTC m=+1266.306361132" lastFinishedPulling="2026-01-23 11:10:21.843654918 +0000 UTC m=+1286.468334777" observedRunningTime="2026-01-23 11:10:59.90157432 +0000 UTC m=+1324.526254179" watchObservedRunningTime="2026-01-23 11:10:59.905897527 +0000 UTC m=+1324.530577386" Jan 23 11:11:00 crc kubenswrapper[4689]: I0123 11:11:00.513862 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 23 11:11:00 crc kubenswrapper[4689]: I0123 11:11:00.579975 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 23 11:11:00 crc kubenswrapper[4689]: I0123 11:11:00.697059 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" event={"ID":"407dd7c8-3c4f-4345-a26e-29f59646bf5e","Type":"ContainerStarted","Data":"ffb58886e789bcb1fe7c5aadfffcb241d0474c7187b7c9defa41caf9384dbea4"} Jan 23 11:11:00 crc kubenswrapper[4689]: I0123 11:11:00.698099 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:11:00 crc kubenswrapper[4689]: E0123 11:11:00.703133 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="8245587e-b385-48bf-a684-2c72fedfb5d6" Jan 23 11:11:00 crc kubenswrapper[4689]: I0123 11:11:00.722661 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" podStartSLOduration=14.722637656 podStartE2EDuration="14.722637656s" podCreationTimestamp="2026-01-23 11:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:00.719626382 +0000 UTC m=+1325.344306251" watchObservedRunningTime="2026-01-23 11:11:00.722637656 +0000 UTC m=+1325.347317515" Jan 23 11:11:00 crc kubenswrapper[4689]: I0123 11:11:00.761260 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 23 11:11:01 crc kubenswrapper[4689]: I0123 11:11:01.474469 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 23 11:11:01 crc kubenswrapper[4689]: E0123 11:11:01.477178 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="5c276d1f-838f-4113-b343-18c150dfa59b" Jan 23 11:11:01 crc kubenswrapper[4689]: I0123 11:11:01.518917 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 23 11:11:01 crc kubenswrapper[4689]: I0123 11:11:01.712837 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 23 11:11:01 crc kubenswrapper[4689]: E0123 11:11:01.714506 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="5c276d1f-838f-4113-b343-18c150dfa59b" Jan 23 11:11:01 crc kubenswrapper[4689]: E0123 11:11:01.714744 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="8245587e-b385-48bf-a684-2c72fedfb5d6" Jan 23 11:11:01 crc kubenswrapper[4689]: I0123 11:11:01.759652 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 23 11:11:02 crc kubenswrapper[4689]: I0123 11:11:02.612187 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 23 11:11:02 crc kubenswrapper[4689]: I0123 11:11:02.612307 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 23 11:11:02 crc kubenswrapper[4689]: E0123 11:11:02.722706 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="8245587e-b385-48bf-a684-2c72fedfb5d6" Jan 23 11:11:02 crc kubenswrapper[4689]: E0123 11:11:02.722923 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="5c276d1f-838f-4113-b343-18c150dfa59b" Jan 23 11:11:03 crc kubenswrapper[4689]: I0123 11:11:03.138337 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:11:03 crc kubenswrapper[4689]: E0123 11:11:03.138818 4689 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 11:11:03 crc kubenswrapper[4689]: E0123 11:11:03.138837 4689 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 11:11:03 crc kubenswrapper[4689]: E0123 11:11:03.138895 4689 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift podName:502e87fb-9e46-41c3-929e-c007018641db nodeName:}" failed. No retries permitted until 2026-01-23 11:11:19.138877336 +0000 UTC m=+1343.763557195 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift") pod "swift-storage-0" (UID: "502e87fb-9e46-41c3-929e-c007018641db") : configmap "swift-ring-files" not found Jan 23 11:11:03 crc kubenswrapper[4689]: E0123 11:11:03.731464 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="5c276d1f-838f-4113-b343-18c150dfa59b" Jan 23 11:11:04 crc kubenswrapper[4689]: I0123 11:11:04.202467 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 23 11:11:04 crc kubenswrapper[4689]: I0123 11:11:04.202846 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 23 11:11:05 crc kubenswrapper[4689]: I0123 11:11:05.288156 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-s4nwf" podUID="6c69c7bf-0e75-4bed-a212-2b7746d5ef88" containerName="ovn-controller" probeResult="failure" output=< Jan 23 11:11:05 crc kubenswrapper[4689]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 23 11:11:05 crc kubenswrapper[4689]: > Jan 23 11:11:05 crc kubenswrapper[4689]: I0123 11:11:05.332027 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:11:06 crc kubenswrapper[4689]: I0123 11:11:06.398375 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:11:06 crc kubenswrapper[4689]: I0123 11:11:06.458018 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-l6zc2"] Jan 23 11:11:06 crc kubenswrapper[4689]: I0123 11:11:06.458287 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-l6zc2" podUID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerName="dnsmasq-dns" containerID="cri-o://9e0be4ceb870679d2d7a320424ff1f891fced4fefce8395b379944cea9ebb218" gracePeriod=10 Jan 23 11:11:06 crc kubenswrapper[4689]: I0123 11:11:06.786655 4689 generic.go:334] "Generic (PLEG): container finished" podID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerID="9e0be4ceb870679d2d7a320424ff1f891fced4fefce8395b379944cea9ebb218" exitCode=0 Jan 23 11:11:06 crc kubenswrapper[4689]: I0123 11:11:06.786702 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-l6zc2" event={"ID":"5ded0d37-bdeb-425a-bed5-c7130a590643","Type":"ContainerDied","Data":"9e0be4ceb870679d2d7a320424ff1f891fced4fefce8395b379944cea9ebb218"} Jan 23 11:11:06 crc kubenswrapper[4689]: I0123 11:11:06.906204 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 23 11:11:07 crc kubenswrapper[4689]: I0123 11:11:07.030280 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 23 11:11:08 crc kubenswrapper[4689]: I0123 11:11:08.782125 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 23 11:11:08 crc kubenswrapper[4689]: I0123 11:11:08.898956 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 23 11:11:08 crc kubenswrapper[4689]: I0123 11:11:08.976878 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.082060 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-sb\") pod \"5ded0d37-bdeb-425a-bed5-c7130a590643\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.082266 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wf7wt\" (UniqueName: \"kubernetes.io/projected/5ded0d37-bdeb-425a-bed5-c7130a590643-kube-api-access-wf7wt\") pod \"5ded0d37-bdeb-425a-bed5-c7130a590643\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.082297 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-config\") pod \"5ded0d37-bdeb-425a-bed5-c7130a590643\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.082383 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-dns-svc\") pod \"5ded0d37-bdeb-425a-bed5-c7130a590643\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.082403 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-nb\") pod \"5ded0d37-bdeb-425a-bed5-c7130a590643\" (UID: \"5ded0d37-bdeb-425a-bed5-c7130a590643\") " Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.086647 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ded0d37-bdeb-425a-bed5-c7130a590643-kube-api-access-wf7wt" (OuterVolumeSpecName: "kube-api-access-wf7wt") pod "5ded0d37-bdeb-425a-bed5-c7130a590643" (UID: "5ded0d37-bdeb-425a-bed5-c7130a590643"). InnerVolumeSpecName "kube-api-access-wf7wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.137645 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5ded0d37-bdeb-425a-bed5-c7130a590643" (UID: "5ded0d37-bdeb-425a-bed5-c7130a590643"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.137657 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5ded0d37-bdeb-425a-bed5-c7130a590643" (UID: "5ded0d37-bdeb-425a-bed5-c7130a590643"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.156672 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5ded0d37-bdeb-425a-bed5-c7130a590643" (UID: "5ded0d37-bdeb-425a-bed5-c7130a590643"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.157674 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-config" (OuterVolumeSpecName: "config") pod "5ded0d37-bdeb-425a-bed5-c7130a590643" (UID: "5ded0d37-bdeb-425a-bed5-c7130a590643"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.184732 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wf7wt\" (UniqueName: \"kubernetes.io/projected/5ded0d37-bdeb-425a-bed5-c7130a590643-kube-api-access-wf7wt\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.184772 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.184785 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.184797 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.184809 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ded0d37-bdeb-425a-bed5-c7130a590643-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.374330 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-16b3-account-create-update-m8m5d"] Jan 23 11:11:09 crc kubenswrapper[4689]: E0123 11:11:09.374772 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="dnsmasq-dns" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.374788 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="dnsmasq-dns" Jan 23 11:11:09 crc kubenswrapper[4689]: E0123 11:11:09.374801 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7525f02f-3c52-4e75-bace-0d3e1bedeee8" containerName="console" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.374808 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7525f02f-3c52-4e75-bace-0d3e1bedeee8" containerName="console" Jan 23 11:11:09 crc kubenswrapper[4689]: E0123 11:11:09.374827 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerName="init" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.374832 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerName="init" Jan 23 11:11:09 crc kubenswrapper[4689]: E0123 11:11:09.374852 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="init" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.374857 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="init" Jan 23 11:11:09 crc kubenswrapper[4689]: E0123 11:11:09.374874 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerName="dnsmasq-dns" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.374880 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerName="dnsmasq-dns" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.375096 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b364f11-6505-4150-976d-5424c3f8b686" containerName="dnsmasq-dns" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.375116 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ded0d37-bdeb-425a-bed5-c7130a590643" containerName="dnsmasq-dns" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.375127 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7525f02f-3c52-4e75-bace-0d3e1bedeee8" containerName="console" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.375905 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.379228 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.386523 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5x5bf"] Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.387918 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.400578 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5x5bf"] Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.422575 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-16b3-account-create-update-m8m5d"] Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.500339 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmbwc\" (UniqueName: \"kubernetes.io/projected/85404a8c-170b-4e87-9f31-c584c1cd479d-kube-api-access-vmbwc\") pod \"glance-db-create-5x5bf\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.500628 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85404a8c-170b-4e87-9f31-c584c1cd479d-operator-scripts\") pod \"glance-db-create-5x5bf\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.500891 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzwxc\" (UniqueName: \"kubernetes.io/projected/a4c15870-4970-4b10-b1fb-ecd61c9d341b-kube-api-access-gzwxc\") pod \"glance-16b3-account-create-update-m8m5d\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.501014 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c15870-4970-4b10-b1fb-ecd61c9d341b-operator-scripts\") pod \"glance-16b3-account-create-update-m8m5d\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.602529 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzwxc\" (UniqueName: \"kubernetes.io/projected/a4c15870-4970-4b10-b1fb-ecd61c9d341b-kube-api-access-gzwxc\") pod \"glance-16b3-account-create-update-m8m5d\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.602577 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c15870-4970-4b10-b1fb-ecd61c9d341b-operator-scripts\") pod \"glance-16b3-account-create-update-m8m5d\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.602687 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmbwc\" (UniqueName: \"kubernetes.io/projected/85404a8c-170b-4e87-9f31-c584c1cd479d-kube-api-access-vmbwc\") pod \"glance-db-create-5x5bf\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.602714 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85404a8c-170b-4e87-9f31-c584c1cd479d-operator-scripts\") pod \"glance-db-create-5x5bf\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.603398 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85404a8c-170b-4e87-9f31-c584c1cd479d-operator-scripts\") pod \"glance-db-create-5x5bf\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.603897 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c15870-4970-4b10-b1fb-ecd61c9d341b-operator-scripts\") pod \"glance-16b3-account-create-update-m8m5d\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.625022 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzwxc\" (UniqueName: \"kubernetes.io/projected/a4c15870-4970-4b10-b1fb-ecd61c9d341b-kube-api-access-gzwxc\") pod \"glance-16b3-account-create-update-m8m5d\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.625534 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmbwc\" (UniqueName: \"kubernetes.io/projected/85404a8c-170b-4e87-9f31-c584c1cd479d-kube-api-access-vmbwc\") pod \"glance-db-create-5x5bf\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.704490 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.716840 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.826640 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sb9zz" event={"ID":"3a33baed-7a5b-44f9-b344-114919fa316b","Type":"ContainerStarted","Data":"bea69c7103a42c83b0e83524036422d2b3f3161ffaaab6cb4e5fb73ed6df8f9e"} Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.836022 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerStarted","Data":"1310290e165d150996358a466360c57a9de403355d235b889f236bd0ac425459"} Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.840244 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-l6zc2" event={"ID":"5ded0d37-bdeb-425a-bed5-c7130a590643","Type":"ContainerDied","Data":"05dfb70a3bf3e4bd6b549ca9e75d10e3c8c284f824a8769b7ff7845dc71d987d"} Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.840310 4689 scope.go:117] "RemoveContainer" containerID="9e0be4ceb870679d2d7a320424ff1f891fced4fefce8395b379944cea9ebb218" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.840492 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-l6zc2" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.852964 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-sb9zz" podStartSLOduration=12.563909449 podStartE2EDuration="22.852948066s" podCreationTimestamp="2026-01-23 11:10:47 +0000 UTC" firstStartedPulling="2026-01-23 11:10:58.380594301 +0000 UTC m=+1323.005274160" lastFinishedPulling="2026-01-23 11:11:08.669632908 +0000 UTC m=+1333.294312777" observedRunningTime="2026-01-23 11:11:09.850732921 +0000 UTC m=+1334.475412780" watchObservedRunningTime="2026-01-23 11:11:09.852948066 +0000 UTC m=+1334.477627915" Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.885941 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-l6zc2"] Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.894000 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-l6zc2"] Jan 23 11:11:09 crc kubenswrapper[4689]: I0123 11:11:09.896628 4689 scope.go:117] "RemoveContainer" containerID="17ac550e891ed1780556e8d43714f65a66e3b79c9cafa1f2e27eaef940d51793" Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.270114 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-16b3-account-create-update-m8m5d"] Jan 23 11:11:10 crc kubenswrapper[4689]: W0123 11:11:10.276748 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4c15870_4970_4b10_b1fb_ecd61c9d341b.slice/crio-b3a12bc17b0bc288c078a26d9c730f4791d716b9806f2ac3820595f78129e5ba WatchSource:0}: Error finding container b3a12bc17b0bc288c078a26d9c730f4791d716b9806f2ac3820595f78129e5ba: Status 404 returned error can't find the container with id b3a12bc17b0bc288c078a26d9c730f4791d716b9806f2ac3820595f78129e5ba Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.308632 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-s4nwf" podUID="6c69c7bf-0e75-4bed-a212-2b7746d5ef88" containerName="ovn-controller" probeResult="failure" output=< Jan 23 11:11:10 crc kubenswrapper[4689]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 23 11:11:10 crc kubenswrapper[4689]: > Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.353047 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-kfl9p" Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.403529 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5x5bf"] Jan 23 11:11:10 crc kubenswrapper[4689]: W0123 11:11:10.486338 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85404a8c_170b_4e87_9f31_c584c1cd479d.slice/crio-408f7151f13d9bacf8147aca4b32449afbbfc639d59096d8aa3c3d6cbe70dcf0 WatchSource:0}: Error finding container 408f7151f13d9bacf8147aca4b32449afbbfc639d59096d8aa3c3d6cbe70dcf0: Status 404 returned error can't find the container with id 408f7151f13d9bacf8147aca4b32449afbbfc639d59096d8aa3c3d6cbe70dcf0 Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.850081 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5x5bf" event={"ID":"85404a8c-170b-4e87-9f31-c584c1cd479d","Type":"ContainerStarted","Data":"ec07cf8555d0a386dbe82e4c2dfbb4baa36cc85c76c9efff248e2a93512d1b9f"} Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.850138 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5x5bf" event={"ID":"85404a8c-170b-4e87-9f31-c584c1cd479d","Type":"ContainerStarted","Data":"408f7151f13d9bacf8147aca4b32449afbbfc639d59096d8aa3c3d6cbe70dcf0"} Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.851628 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-16b3-account-create-update-m8m5d" event={"ID":"a4c15870-4970-4b10-b1fb-ecd61c9d341b","Type":"ContainerStarted","Data":"b3a12bc17b0bc288c078a26d9c730f4791d716b9806f2ac3820595f78129e5ba"} Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.866631 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-5x5bf" podStartSLOduration=1.8666109739999999 podStartE2EDuration="1.866610974s" podCreationTimestamp="2026-01-23 11:11:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:10.862043379 +0000 UTC m=+1335.486723238" watchObservedRunningTime="2026-01-23 11:11:10.866610974 +0000 UTC m=+1335.491290833" Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.920471 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.127:5671: connect: connection refused" Jan 23 11:11:10 crc kubenswrapper[4689]: I0123 11:11:10.976821 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.237982 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-lztn4"] Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.239215 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.241984 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.253543 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.128:5671: connect: connection refused" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.288812 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-lztn4"] Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.339906 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxxmm\" (UniqueName: \"kubernetes.io/projected/9e8c81b8-389e-4d94-a1c6-6f335d468419-kube-api-access-sxxmm\") pod \"root-account-create-update-lztn4\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.340092 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e8c81b8-389e-4d94-a1c6-6f335d468419-operator-scripts\") pod \"root-account-create-update-lztn4\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.347840 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.441531 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxxmm\" (UniqueName: \"kubernetes.io/projected/9e8c81b8-389e-4d94-a1c6-6f335d468419-kube-api-access-sxxmm\") pod \"root-account-create-update-lztn4\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.441616 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e8c81b8-389e-4d94-a1c6-6f335d468419-operator-scripts\") pod \"root-account-create-update-lztn4\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.442257 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e8c81b8-389e-4d94-a1c6-6f335d468419-operator-scripts\") pod \"root-account-create-update-lztn4\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.473550 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxxmm\" (UniqueName: \"kubernetes.io/projected/9e8c81b8-389e-4d94-a1c6-6f335d468419-kube-api-access-sxxmm\") pod \"root-account-create-update-lztn4\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.616521 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.655172 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ded0d37-bdeb-425a-bed5-c7130a590643" path="/var/lib/kubelet/pods/5ded0d37-bdeb-425a-bed5-c7130a590643/volumes" Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.897946 4689 generic.go:334] "Generic (PLEG): container finished" podID="a4c15870-4970-4b10-b1fb-ecd61c9d341b" containerID="a1b5137c2ed5bba3732515cce96013d48c40bcbec246de59219540e533ecfe0e" exitCode=0 Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.898022 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-16b3-account-create-update-m8m5d" event={"ID":"a4c15870-4970-4b10-b1fb-ecd61c9d341b","Type":"ContainerDied","Data":"a1b5137c2ed5bba3732515cce96013d48c40bcbec246de59219540e533ecfe0e"} Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.915352 4689 generic.go:334] "Generic (PLEG): container finished" podID="85404a8c-170b-4e87-9f31-c584c1cd479d" containerID="ec07cf8555d0a386dbe82e4c2dfbb4baa36cc85c76c9efff248e2a93512d1b9f" exitCode=0 Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.915419 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5x5bf" event={"ID":"85404a8c-170b-4e87-9f31-c584c1cd479d","Type":"ContainerDied","Data":"ec07cf8555d0a386dbe82e4c2dfbb4baa36cc85c76c9efff248e2a93512d1b9f"} Jan 23 11:11:11 crc kubenswrapper[4689]: I0123 11:11:11.928361 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerStarted","Data":"70db53e6882bea47c8e8cf0319e00077420a0150cde8fb9fb2c5406c1c9d487b"} Jan 23 11:11:12 crc kubenswrapper[4689]: W0123 11:11:12.117372 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e8c81b8_389e_4d94_a1c6_6f335d468419.slice/crio-e73728efb97b7ba076326008bbb5981fb70bda0e68ba67c8531cf9429b40a295 WatchSource:0}: Error finding container e73728efb97b7ba076326008bbb5981fb70bda0e68ba67c8531cf9429b40a295: Status 404 returned error can't find the container with id e73728efb97b7ba076326008bbb5981fb70bda0e68ba67c8531cf9429b40a295 Jan 23 11:11:12 crc kubenswrapper[4689]: I0123 11:11:12.128046 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-lztn4"] Jan 23 11:11:12 crc kubenswrapper[4689]: I0123 11:11:12.938874 4689 generic.go:334] "Generic (PLEG): container finished" podID="9e8c81b8-389e-4d94-a1c6-6f335d468419" containerID="fa194d92b4d55899383f8e8fd404a9d907f9d724fd04054efc3c783551079984" exitCode=0 Jan 23 11:11:12 crc kubenswrapper[4689]: I0123 11:11:12.938957 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lztn4" event={"ID":"9e8c81b8-389e-4d94-a1c6-6f335d468419","Type":"ContainerDied","Data":"fa194d92b4d55899383f8e8fd404a9d907f9d724fd04054efc3c783551079984"} Jan 23 11:11:12 crc kubenswrapper[4689]: I0123 11:11:12.939022 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lztn4" event={"ID":"9e8c81b8-389e-4d94-a1c6-6f335d468419","Type":"ContainerStarted","Data":"e73728efb97b7ba076326008bbb5981fb70bda0e68ba67c8531cf9429b40a295"} Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.468443 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.580533 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.596595 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmbwc\" (UniqueName: \"kubernetes.io/projected/85404a8c-170b-4e87-9f31-c584c1cd479d-kube-api-access-vmbwc\") pod \"85404a8c-170b-4e87-9f31-c584c1cd479d\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.596709 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85404a8c-170b-4e87-9f31-c584c1cd479d-operator-scripts\") pod \"85404a8c-170b-4e87-9f31-c584c1cd479d\" (UID: \"85404a8c-170b-4e87-9f31-c584c1cd479d\") " Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.598071 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85404a8c-170b-4e87-9f31-c584c1cd479d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "85404a8c-170b-4e87-9f31-c584c1cd479d" (UID: "85404a8c-170b-4e87-9f31-c584c1cd479d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.603532 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85404a8c-170b-4e87-9f31-c584c1cd479d-kube-api-access-vmbwc" (OuterVolumeSpecName: "kube-api-access-vmbwc") pod "85404a8c-170b-4e87-9f31-c584c1cd479d" (UID: "85404a8c-170b-4e87-9f31-c584c1cd479d"). InnerVolumeSpecName "kube-api-access-vmbwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.702226 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c15870-4970-4b10-b1fb-ecd61c9d341b-operator-scripts\") pod \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.702320 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzwxc\" (UniqueName: \"kubernetes.io/projected/a4c15870-4970-4b10-b1fb-ecd61c9d341b-kube-api-access-gzwxc\") pod \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\" (UID: \"a4c15870-4970-4b10-b1fb-ecd61c9d341b\") " Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.702738 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmbwc\" (UniqueName: \"kubernetes.io/projected/85404a8c-170b-4e87-9f31-c584c1cd479d-kube-api-access-vmbwc\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.702760 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85404a8c-170b-4e87-9f31-c584c1cd479d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.704599 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4c15870-4970-4b10-b1fb-ecd61c9d341b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4c15870-4970-4b10-b1fb-ecd61c9d341b" (UID: "a4c15870-4970-4b10-b1fb-ecd61c9d341b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.717618 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c15870-4970-4b10-b1fb-ecd61c9d341b-kube-api-access-gzwxc" (OuterVolumeSpecName: "kube-api-access-gzwxc") pod "a4c15870-4970-4b10-b1fb-ecd61c9d341b" (UID: "a4c15870-4970-4b10-b1fb-ecd61c9d341b"). InnerVolumeSpecName "kube-api-access-gzwxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.731408 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-drbfg"] Jan 23 11:11:13 crc kubenswrapper[4689]: E0123 11:11:13.732011 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c15870-4970-4b10-b1fb-ecd61c9d341b" containerName="mariadb-account-create-update" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.732030 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c15870-4970-4b10-b1fb-ecd61c9d341b" containerName="mariadb-account-create-update" Jan 23 11:11:13 crc kubenswrapper[4689]: E0123 11:11:13.732044 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85404a8c-170b-4e87-9f31-c584c1cd479d" containerName="mariadb-database-create" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.732051 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="85404a8c-170b-4e87-9f31-c584c1cd479d" containerName="mariadb-database-create" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.732289 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c15870-4970-4b10-b1fb-ecd61c9d341b" containerName="mariadb-account-create-update" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.732307 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="85404a8c-170b-4e87-9f31-c584c1cd479d" containerName="mariadb-database-create" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.742588 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.750999 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6c5d-account-create-update-djhds"] Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.752487 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.755491 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.764948 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-drbfg"] Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.777222 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c5d-account-create-update-djhds"] Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.819960 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brm7j\" (UniqueName: \"kubernetes.io/projected/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-kube-api-access-brm7j\") pod \"keystone-db-create-drbfg\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.820137 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-operator-scripts\") pod \"keystone-db-create-drbfg\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.820486 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c15870-4970-4b10-b1fb-ecd61c9d341b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.820506 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzwxc\" (UniqueName: \"kubernetes.io/projected/a4c15870-4970-4b10-b1fb-ecd61c9d341b-kube-api-access-gzwxc\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.924723 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e16c499-f349-4cd0-bbe9-85524b31ba67-operator-scripts\") pod \"keystone-6c5d-account-create-update-djhds\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.925269 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brm7j\" (UniqueName: \"kubernetes.io/projected/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-kube-api-access-brm7j\") pod \"keystone-db-create-drbfg\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.925469 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-operator-scripts\") pod \"keystone-db-create-drbfg\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.925515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72fmg\" (UniqueName: \"kubernetes.io/projected/7e16c499-f349-4cd0-bbe9-85524b31ba67-kube-api-access-72fmg\") pod \"keystone-6c5d-account-create-update-djhds\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.941046 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-operator-scripts\") pod \"keystone-db-create-drbfg\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.950120 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brm7j\" (UniqueName: \"kubernetes.io/projected/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-kube-api-access-brm7j\") pod \"keystone-db-create-drbfg\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.982524 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5x5bf" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.982620 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5x5bf" event={"ID":"85404a8c-170b-4e87-9f31-c584c1cd479d","Type":"ContainerDied","Data":"408f7151f13d9bacf8147aca4b32449afbbfc639d59096d8aa3c3d6cbe70dcf0"} Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.982654 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="408f7151f13d9bacf8147aca4b32449afbbfc639d59096d8aa3c3d6cbe70dcf0" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.990261 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-16b3-account-create-update-m8m5d" Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.990574 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-16b3-account-create-update-m8m5d" event={"ID":"a4c15870-4970-4b10-b1fb-ecd61c9d341b","Type":"ContainerDied","Data":"b3a12bc17b0bc288c078a26d9c730f4791d716b9806f2ac3820595f78129e5ba"} Jan 23 11:11:13 crc kubenswrapper[4689]: I0123 11:11:13.990640 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3a12bc17b0bc288c078a26d9c730f4791d716b9806f2ac3820595f78129e5ba" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.028212 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72fmg\" (UniqueName: \"kubernetes.io/projected/7e16c499-f349-4cd0-bbe9-85524b31ba67-kube-api-access-72fmg\") pod \"keystone-6c5d-account-create-update-djhds\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.028542 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e16c499-f349-4cd0-bbe9-85524b31ba67-operator-scripts\") pod \"keystone-6c5d-account-create-update-djhds\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.029572 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e16c499-f349-4cd0-bbe9-85524b31ba67-operator-scripts\") pod \"keystone-6c5d-account-create-update-djhds\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.031129 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-hzwkd"] Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.033092 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.065973 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-hzwkd"] Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.067268 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72fmg\" (UniqueName: \"kubernetes.io/projected/7e16c499-f349-4cd0-bbe9-85524b31ba67-kube-api-access-72fmg\") pod \"keystone-6c5d-account-create-update-djhds\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.086428 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-05a3-account-create-update-8sl6r"] Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.088188 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.090981 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.107081 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-05a3-account-create-update-8sl6r"] Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.117962 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.132459 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-operator-scripts\") pod \"placement-05a3-account-create-update-8sl6r\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.132723 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mh59\" (UniqueName: \"kubernetes.io/projected/f3d7001f-8caf-4867-bfec-1e60eaeefad8-kube-api-access-9mh59\") pod \"placement-db-create-hzwkd\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.133397 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d7001f-8caf-4867-bfec-1e60eaeefad8-operator-scripts\") pod \"placement-db-create-hzwkd\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.133526 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzdrg\" (UniqueName: \"kubernetes.io/projected/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-kube-api-access-zzdrg\") pod \"placement-05a3-account-create-update-8sl6r\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.175375 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.235243 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzdrg\" (UniqueName: \"kubernetes.io/projected/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-kube-api-access-zzdrg\") pod \"placement-05a3-account-create-update-8sl6r\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.235378 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-operator-scripts\") pod \"placement-05a3-account-create-update-8sl6r\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.235460 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mh59\" (UniqueName: \"kubernetes.io/projected/f3d7001f-8caf-4867-bfec-1e60eaeefad8-kube-api-access-9mh59\") pod \"placement-db-create-hzwkd\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.235502 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d7001f-8caf-4867-bfec-1e60eaeefad8-operator-scripts\") pod \"placement-db-create-hzwkd\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.236298 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d7001f-8caf-4867-bfec-1e60eaeefad8-operator-scripts\") pod \"placement-db-create-hzwkd\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.237588 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-operator-scripts\") pod \"placement-05a3-account-create-update-8sl6r\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.256082 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mh59\" (UniqueName: \"kubernetes.io/projected/f3d7001f-8caf-4867-bfec-1e60eaeefad8-kube-api-access-9mh59\") pod \"placement-db-create-hzwkd\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.256254 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzdrg\" (UniqueName: \"kubernetes.io/projected/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-kube-api-access-zzdrg\") pod \"placement-05a3-account-create-update-8sl6r\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.380865 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.422651 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.661427 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.746568 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxxmm\" (UniqueName: \"kubernetes.io/projected/9e8c81b8-389e-4d94-a1c6-6f335d468419-kube-api-access-sxxmm\") pod \"9e8c81b8-389e-4d94-a1c6-6f335d468419\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.746707 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e8c81b8-389e-4d94-a1c6-6f335d468419-operator-scripts\") pod \"9e8c81b8-389e-4d94-a1c6-6f335d468419\" (UID: \"9e8c81b8-389e-4d94-a1c6-6f335d468419\") " Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.747284 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e8c81b8-389e-4d94-a1c6-6f335d468419-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9e8c81b8-389e-4d94-a1c6-6f335d468419" (UID: "9e8c81b8-389e-4d94-a1c6-6f335d468419"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.747678 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e8c81b8-389e-4d94-a1c6-6f335d468419-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.752147 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e8c81b8-389e-4d94-a1c6-6f335d468419-kube-api-access-sxxmm" (OuterVolumeSpecName: "kube-api-access-sxxmm") pod "9e8c81b8-389e-4d94-a1c6-6f335d468419" (UID: "9e8c81b8-389e-4d94-a1c6-6f335d468419"). InnerVolumeSpecName "kube-api-access-sxxmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.752823 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-drbfg"] Jan 23 11:11:14 crc kubenswrapper[4689]: W0123 11:11:14.755799 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod919c9b5f_f82f_45c9_a6df_7193a6a7e6f0.slice/crio-6c9eb7008fd97168aba62c3929414233572fcad28a7961403f06837315385f79 WatchSource:0}: Error finding container 6c9eb7008fd97168aba62c3929414233572fcad28a7961403f06837315385f79: Status 404 returned error can't find the container with id 6c9eb7008fd97168aba62c3929414233572fcad28a7961403f06837315385f79 Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.850059 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxxmm\" (UniqueName: \"kubernetes.io/projected/9e8c81b8-389e-4d94-a1c6-6f335d468419-kube-api-access-sxxmm\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:14 crc kubenswrapper[4689]: I0123 11:11:14.902690 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c5d-account-create-update-djhds"] Jan 23 11:11:14 crc kubenswrapper[4689]: W0123 11:11:14.918602 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e16c499_f349_4cd0_bbe9_85524b31ba67.slice/crio-d7efb1a850c6003ac11ee696d43ecd6c58339b6992121f26c7088ba963db40ad WatchSource:0}: Error finding container d7efb1a850c6003ac11ee696d43ecd6c58339b6992121f26c7088ba963db40ad: Status 404 returned error can't find the container with id d7efb1a850c6003ac11ee696d43ecd6c58339b6992121f26c7088ba963db40ad Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.018858 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-drbfg" event={"ID":"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0","Type":"ContainerStarted","Data":"cf98ae34220f17583994c83ab86552defca6675eb410b1464b4b6628a926baf7"} Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.018903 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-drbfg" event={"ID":"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0","Type":"ContainerStarted","Data":"6c9eb7008fd97168aba62c3929414233572fcad28a7961403f06837315385f79"} Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.023190 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lztn4" event={"ID":"9e8c81b8-389e-4d94-a1c6-6f335d468419","Type":"ContainerDied","Data":"e73728efb97b7ba076326008bbb5981fb70bda0e68ba67c8531cf9429b40a295"} Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.023262 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e73728efb97b7ba076326008bbb5981fb70bda0e68ba67c8531cf9429b40a295" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.023367 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lztn4" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.028053 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c5d-account-create-update-djhds" event={"ID":"7e16c499-f349-4cd0-bbe9-85524b31ba67","Type":"ContainerStarted","Data":"d7efb1a850c6003ac11ee696d43ecd6c58339b6992121f26c7088ba963db40ad"} Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.043129 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-drbfg" podStartSLOduration=2.043108696 podStartE2EDuration="2.043108696s" podCreationTimestamp="2026-01-23 11:11:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:15.036349127 +0000 UTC m=+1339.661028986" watchObservedRunningTime="2026-01-23 11:11:15.043108696 +0000 UTC m=+1339.667788565" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.125281 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-hzwkd"] Jan 23 11:11:15 crc kubenswrapper[4689]: W0123 11:11:15.132711 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3d7001f_8caf_4867_bfec_1e60eaeefad8.slice/crio-b0b3f5e71c93e8d1425db98fc2d95c78bec17da77231b6eec6ac2fe7c720f365 WatchSource:0}: Error finding container b0b3f5e71c93e8d1425db98fc2d95c78bec17da77231b6eec6ac2fe7c720f365: Status 404 returned error can't find the container with id b0b3f5e71c93e8d1425db98fc2d95c78bec17da77231b6eec6ac2fe7c720f365 Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.134693 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-05a3-account-create-update-8sl6r"] Jan 23 11:11:15 crc kubenswrapper[4689]: W0123 11:11:15.139918 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a9df33f_1781_47c3_b4dc_0f21bdf88a05.slice/crio-ba99f40092b7062e175aa612755092d8609d6f5a01d7f8d652fa6b5dcf7dd3fc WatchSource:0}: Error finding container ba99f40092b7062e175aa612755092d8609d6f5a01d7f8d652fa6b5dcf7dd3fc: Status 404 returned error can't find the container with id ba99f40092b7062e175aa612755092d8609d6f5a01d7f8d652fa6b5dcf7dd3fc Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.299981 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-s4nwf" podUID="6c69c7bf-0e75-4bed-a212-2b7746d5ef88" containerName="ovn-controller" probeResult="failure" output=< Jan 23 11:11:15 crc kubenswrapper[4689]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 23 11:11:15 crc kubenswrapper[4689]: > Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.965176 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-nxlln"] Jan 23 11:11:15 crc kubenswrapper[4689]: E0123 11:11:15.966006 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e8c81b8-389e-4d94-a1c6-6f335d468419" containerName="mariadb-account-create-update" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.966028 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e8c81b8-389e-4d94-a1c6-6f335d468419" containerName="mariadb-account-create-update" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.966353 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e8c81b8-389e-4d94-a1c6-6f335d468419" containerName="mariadb-account-create-update" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.967333 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.981926 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w9xp\" (UniqueName: \"kubernetes.io/projected/7167d100-f446-4745-b762-16d355cf4b9c-kube-api-access-4w9xp\") pod \"mysqld-exporter-openstack-db-create-nxlln\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.982189 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7167d100-f446-4745-b762-16d355cf4b9c-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-nxlln\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:15 crc kubenswrapper[4689]: I0123 11:11:15.983583 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-nxlln"] Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.048259 4689 generic.go:334] "Generic (PLEG): container finished" podID="919c9b5f-f82f-45c9-a6df-7193a6a7e6f0" containerID="cf98ae34220f17583994c83ab86552defca6675eb410b1464b4b6628a926baf7" exitCode=0 Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.048478 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-drbfg" event={"ID":"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0","Type":"ContainerDied","Data":"cf98ae34220f17583994c83ab86552defca6675eb410b1464b4b6628a926baf7"} Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.062129 4689 generic.go:334] "Generic (PLEG): container finished" podID="7e16c499-f349-4cd0-bbe9-85524b31ba67" containerID="ee8739c24f5a4e378d2976ff5191d8d9ed0806cda37799d60bc6057e6dd6029f" exitCode=0 Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.062209 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c5d-account-create-update-djhds" event={"ID":"7e16c499-f349-4cd0-bbe9-85524b31ba67","Type":"ContainerDied","Data":"ee8739c24f5a4e378d2976ff5191d8d9ed0806cda37799d60bc6057e6dd6029f"} Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.063650 4689 generic.go:334] "Generic (PLEG): container finished" podID="f3d7001f-8caf-4867-bfec-1e60eaeefad8" containerID="315bac6a83cba198f9e00863f10cca377df580366bb3193f55b4f2da49e4ca4f" exitCode=0 Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.063801 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-hzwkd" event={"ID":"f3d7001f-8caf-4867-bfec-1e60eaeefad8","Type":"ContainerDied","Data":"315bac6a83cba198f9e00863f10cca377df580366bb3193f55b4f2da49e4ca4f"} Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.063820 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-hzwkd" event={"ID":"f3d7001f-8caf-4867-bfec-1e60eaeefad8","Type":"ContainerStarted","Data":"b0b3f5e71c93e8d1425db98fc2d95c78bec17da77231b6eec6ac2fe7c720f365"} Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.064995 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-plmwq" event={"ID":"833907d1-336e-417d-b362-bffe1f3521d3","Type":"ContainerStarted","Data":"3293e914437a0fa926fc474beffaaea52600ba8717ac21e1546f560bc9243786"} Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.068446 4689 generic.go:334] "Generic (PLEG): container finished" podID="7a9df33f-1781-47c3-b4dc-0f21bdf88a05" containerID="5df8b086d4acc5fc2d31e8db12fc7195ff53936f2e6047c9d9e81ff71ee2bf4a" exitCode=0 Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.068510 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-05a3-account-create-update-8sl6r" event={"ID":"7a9df33f-1781-47c3-b4dc-0f21bdf88a05","Type":"ContainerDied","Data":"5df8b086d4acc5fc2d31e8db12fc7195ff53936f2e6047c9d9e81ff71ee2bf4a"} Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.068539 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-05a3-account-create-update-8sl6r" event={"ID":"7a9df33f-1781-47c3-b4dc-0f21bdf88a05","Type":"ContainerStarted","Data":"ba99f40092b7062e175aa612755092d8609d6f5a01d7f8d652fa6b5dcf7dd3fc"} Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.084132 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7167d100-f446-4745-b762-16d355cf4b9c-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-nxlln\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.085126 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w9xp\" (UniqueName: \"kubernetes.io/projected/7167d100-f446-4745-b762-16d355cf4b9c-kube-api-access-4w9xp\") pod \"mysqld-exporter-openstack-db-create-nxlln\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.085201 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7167d100-f446-4745-b762-16d355cf4b9c-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-nxlln\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.108714 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w9xp\" (UniqueName: \"kubernetes.io/projected/7167d100-f446-4745-b762-16d355cf4b9c-kube-api-access-4w9xp\") pod \"mysqld-exporter-openstack-db-create-nxlln\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.172110 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-2f06-account-create-update-7hlnh"] Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.173079 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-plmwq" podStartSLOduration=3.357750704 podStartE2EDuration="43.173065036s" podCreationTimestamp="2026-01-23 11:10:33 +0000 UTC" firstStartedPulling="2026-01-23 11:10:34.747401322 +0000 UTC m=+1299.372081181" lastFinishedPulling="2026-01-23 11:11:14.562715654 +0000 UTC m=+1339.187395513" observedRunningTime="2026-01-23 11:11:16.169226981 +0000 UTC m=+1340.793906840" watchObservedRunningTime="2026-01-23 11:11:16.173065036 +0000 UTC m=+1340.797744895" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.173778 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.176675 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.187383 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vhr2\" (UniqueName: \"kubernetes.io/projected/02b22380-5105-4302-a086-e75dd58e63e5-kube-api-access-6vhr2\") pod \"mysqld-exporter-2f06-account-create-update-7hlnh\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.187491 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02b22380-5105-4302-a086-e75dd58e63e5-operator-scripts\") pod \"mysqld-exporter-2f06-account-create-update-7hlnh\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.215084 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-2f06-account-create-update-7hlnh"] Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.292529 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.295759 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02b22380-5105-4302-a086-e75dd58e63e5-operator-scripts\") pod \"mysqld-exporter-2f06-account-create-update-7hlnh\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.295993 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vhr2\" (UniqueName: \"kubernetes.io/projected/02b22380-5105-4302-a086-e75dd58e63e5-kube-api-access-6vhr2\") pod \"mysqld-exporter-2f06-account-create-update-7hlnh\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.297010 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02b22380-5105-4302-a086-e75dd58e63e5-operator-scripts\") pod \"mysqld-exporter-2f06-account-create-update-7hlnh\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.333806 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vhr2\" (UniqueName: \"kubernetes.io/projected/02b22380-5105-4302-a086-e75dd58e63e5-kube-api-access-6vhr2\") pod \"mysqld-exporter-2f06-account-create-update-7hlnh\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:16 crc kubenswrapper[4689]: I0123 11:11:16.507689 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.088310 4689 generic.go:334] "Generic (PLEG): container finished" podID="3a33baed-7a5b-44f9-b344-114919fa316b" containerID="bea69c7103a42c83b0e83524036422d2b3f3161ffaaab6cb4e5fb73ed6df8f9e" exitCode=0 Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.088533 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sb9zz" event={"ID":"3a33baed-7a5b-44f9-b344-114919fa316b","Type":"ContainerDied","Data":"bea69c7103a42c83b0e83524036422d2b3f3161ffaaab6cb4e5fb73ed6df8f9e"} Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.528263 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lztn4"] Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.537490 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.539573 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-lztn4"] Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.629377 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mh59\" (UniqueName: \"kubernetes.io/projected/f3d7001f-8caf-4867-bfec-1e60eaeefad8-kube-api-access-9mh59\") pod \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.629717 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d7001f-8caf-4867-bfec-1e60eaeefad8-operator-scripts\") pod \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\" (UID: \"f3d7001f-8caf-4867-bfec-1e60eaeefad8\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.631066 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3d7001f-8caf-4867-bfec-1e60eaeefad8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f3d7001f-8caf-4867-bfec-1e60eaeefad8" (UID: "f3d7001f-8caf-4867-bfec-1e60eaeefad8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.637382 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3d7001f-8caf-4867-bfec-1e60eaeefad8-kube-api-access-9mh59" (OuterVolumeSpecName: "kube-api-access-9mh59") pod "f3d7001f-8caf-4867-bfec-1e60eaeefad8" (UID: "f3d7001f-8caf-4867-bfec-1e60eaeefad8"). InnerVolumeSpecName "kube-api-access-9mh59". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.654501 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e8c81b8-389e-4d94-a1c6-6f335d468419" path="/var/lib/kubelet/pods/9e8c81b8-389e-4d94-a1c6-6f335d468419/volumes" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.723277 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.732346 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mh59\" (UniqueName: \"kubernetes.io/projected/f3d7001f-8caf-4867-bfec-1e60eaeefad8-kube-api-access-9mh59\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.732378 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d7001f-8caf-4867-bfec-1e60eaeefad8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.743021 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.753731 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:17 crc kubenswrapper[4689]: W0123 11:11:17.813001 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7167d100_f446_4745_b762_16d355cf4b9c.slice/crio-7fe4304394bd77a7677fa09fdb943cfed1d5192861607fc0be13b32e35f7a324 WatchSource:0}: Error finding container 7fe4304394bd77a7677fa09fdb943cfed1d5192861607fc0be13b32e35f7a324: Status 404 returned error can't find the container with id 7fe4304394bd77a7677fa09fdb943cfed1d5192861607fc0be13b32e35f7a324 Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.814078 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-nxlln"] Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.835243 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brm7j\" (UniqueName: \"kubernetes.io/projected/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-kube-api-access-brm7j\") pod \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.835644 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-operator-scripts\") pod \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\" (UID: \"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.836221 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "919c9b5f-f82f-45c9-a6df-7193a6a7e6f0" (UID: "919c9b5f-f82f-45c9-a6df-7193a6a7e6f0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.868466 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.870435 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-kube-api-access-brm7j" (OuterVolumeSpecName: "kube-api-access-brm7j") pod "919c9b5f-f82f-45c9-a6df-7193a6a7e6f0" (UID: "919c9b5f-f82f-45c9-a6df-7193a6a7e6f0"). InnerVolumeSpecName "kube-api-access-brm7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.970870 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzdrg\" (UniqueName: \"kubernetes.io/projected/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-kube-api-access-zzdrg\") pod \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.971013 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72fmg\" (UniqueName: \"kubernetes.io/projected/7e16c499-f349-4cd0-bbe9-85524b31ba67-kube-api-access-72fmg\") pod \"7e16c499-f349-4cd0-bbe9-85524b31ba67\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.971088 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-operator-scripts\") pod \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\" (UID: \"7a9df33f-1781-47c3-b4dc-0f21bdf88a05\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.971192 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e16c499-f349-4cd0-bbe9-85524b31ba67-operator-scripts\") pod \"7e16c499-f349-4cd0-bbe9-85524b31ba67\" (UID: \"7e16c499-f349-4cd0-bbe9-85524b31ba67\") " Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.971830 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brm7j\" (UniqueName: \"kubernetes.io/projected/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0-kube-api-access-brm7j\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.972168 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e16c499-f349-4cd0-bbe9-85524b31ba67-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7e16c499-f349-4cd0-bbe9-85524b31ba67" (UID: "7e16c499-f349-4cd0-bbe9-85524b31ba67"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.973746 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7a9df33f-1781-47c3-b4dc-0f21bdf88a05" (UID: "7a9df33f-1781-47c3-b4dc-0f21bdf88a05"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.974730 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e16c499-f349-4cd0-bbe9-85524b31ba67-kube-api-access-72fmg" (OuterVolumeSpecName: "kube-api-access-72fmg") pod "7e16c499-f349-4cd0-bbe9-85524b31ba67" (UID: "7e16c499-f349-4cd0-bbe9-85524b31ba67"). InnerVolumeSpecName "kube-api-access-72fmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.975923 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-2f06-account-create-update-7hlnh"] Jan 23 11:11:17 crc kubenswrapper[4689]: I0123 11:11:17.976202 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-kube-api-access-zzdrg" (OuterVolumeSpecName: "kube-api-access-zzdrg") pod "7a9df33f-1781-47c3-b4dc-0f21bdf88a05" (UID: "7a9df33f-1781-47c3-b4dc-0f21bdf88a05"). InnerVolumeSpecName "kube-api-access-zzdrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:17 crc kubenswrapper[4689]: W0123 11:11:17.981609 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02b22380_5105_4302_a086_e75dd58e63e5.slice/crio-2c8823883d2cbaa07b920a9bce48db012e9235f1708d98c6a0f7488a83c8d22b WatchSource:0}: Error finding container 2c8823883d2cbaa07b920a9bce48db012e9235f1708d98c6a0f7488a83c8d22b: Status 404 returned error can't find the container with id 2c8823883d2cbaa07b920a9bce48db012e9235f1708d98c6a0f7488a83c8d22b Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.073434 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e16c499-f349-4cd0-bbe9-85524b31ba67-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.073640 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzdrg\" (UniqueName: \"kubernetes.io/projected/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-kube-api-access-zzdrg\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.073654 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72fmg\" (UniqueName: \"kubernetes.io/projected/7e16c499-f349-4cd0-bbe9-85524b31ba67-kube-api-access-72fmg\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.073664 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9df33f-1781-47c3-b4dc-0f21bdf88a05-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.097567 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c5d-account-create-update-djhds" event={"ID":"7e16c499-f349-4cd0-bbe9-85524b31ba67","Type":"ContainerDied","Data":"d7efb1a850c6003ac11ee696d43ecd6c58339b6992121f26c7088ba963db40ad"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.097620 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c5d-account-create-update-djhds" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.097633 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7efb1a850c6003ac11ee696d43ecd6c58339b6992121f26c7088ba963db40ad" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.100628 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-hzwkd" event={"ID":"f3d7001f-8caf-4867-bfec-1e60eaeefad8","Type":"ContainerDied","Data":"b0b3f5e71c93e8d1425db98fc2d95c78bec17da77231b6eec6ac2fe7c720f365"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.100695 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0b3f5e71c93e8d1425db98fc2d95c78bec17da77231b6eec6ac2fe7c720f365" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.100665 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hzwkd" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.102220 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"5c276d1f-838f-4113-b343-18c150dfa59b","Type":"ContainerStarted","Data":"b4fe8fddd7af95107533ac84ee2607cd8a314cca11596acd944dda79fa76ad07"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.103626 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-05a3-account-create-update-8sl6r" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.103710 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-05a3-account-create-update-8sl6r" event={"ID":"7a9df33f-1781-47c3-b4dc-0f21bdf88a05","Type":"ContainerDied","Data":"ba99f40092b7062e175aa612755092d8609d6f5a01d7f8d652fa6b5dcf7dd3fc"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.103729 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba99f40092b7062e175aa612755092d8609d6f5a01d7f8d652fa6b5dcf7dd3fc" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.105507 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8245587e-b385-48bf-a684-2c72fedfb5d6","Type":"ContainerStarted","Data":"6b586b4815d5c0bc5e705435d2f451b92d2e342ffc41572ff15593caa86ec445"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.111448 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" event={"ID":"02b22380-5105-4302-a086-e75dd58e63e5","Type":"ContainerStarted","Data":"2c8823883d2cbaa07b920a9bce48db012e9235f1708d98c6a0f7488a83c8d22b"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.114771 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerStarted","Data":"9771dbc496627ce3796b50ee2d64ab65bf1bbab3a1b1639171333ea70927098d"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.122201 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" event={"ID":"7167d100-f446-4745-b762-16d355cf4b9c","Type":"ContainerStarted","Data":"3481f853370372acd70a380420bb978c71911e6cd833279f2a67bb92a5b07161"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.122249 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" event={"ID":"7167d100-f446-4745-b762-16d355cf4b9c","Type":"ContainerStarted","Data":"7fe4304394bd77a7677fa09fdb943cfed1d5192861607fc0be13b32e35f7a324"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.129223 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-drbfg" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.129260 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-drbfg" event={"ID":"919c9b5f-f82f-45c9-a6df-7193a6a7e6f0","Type":"ContainerDied","Data":"6c9eb7008fd97168aba62c3929414233572fcad28a7961403f06837315385f79"} Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.129291 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c9eb7008fd97168aba62c3929414233572fcad28a7961403f06837315385f79" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.131759 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=57.916573169 podStartE2EDuration="1m6.131737484s" podCreationTimestamp="2026-01-23 11:10:12 +0000 UTC" firstStartedPulling="2026-01-23 11:10:23.454826818 +0000 UTC m=+1288.079506677" lastFinishedPulling="2026-01-23 11:10:31.669991133 +0000 UTC m=+1296.294670992" observedRunningTime="2026-01-23 11:11:18.124020451 +0000 UTC m=+1342.748700310" watchObservedRunningTime="2026-01-23 11:11:18.131737484 +0000 UTC m=+1342.756417343" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.204665 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" podStartSLOduration=3.204645157 podStartE2EDuration="3.204645157s" podCreationTimestamp="2026-01-23 11:11:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:18.185507571 +0000 UTC m=+1342.810187430" watchObservedRunningTime="2026-01-23 11:11:18.204645157 +0000 UTC m=+1342.829325016" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.214062 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=62.002825105 podStartE2EDuration="1m10.214042941s" podCreationTimestamp="2026-01-23 11:10:08 +0000 UTC" firstStartedPulling="2026-01-23 11:10:24.064761962 +0000 UTC m=+1288.689441821" lastFinishedPulling="2026-01-23 11:10:32.275979798 +0000 UTC m=+1296.900659657" observedRunningTime="2026-01-23 11:11:18.164295623 +0000 UTC m=+1342.788975482" watchObservedRunningTime="2026-01-23 11:11:18.214042941 +0000 UTC m=+1342.838722810" Jan 23 11:11:18 crc kubenswrapper[4689]: I0123 11:11:18.219202 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=19.029016492 podStartE2EDuration="1m13.219187669s" podCreationTimestamp="2026-01-23 11:10:05 +0000 UTC" firstStartedPulling="2026-01-23 11:10:23.013413136 +0000 UTC m=+1287.638092995" lastFinishedPulling="2026-01-23 11:11:17.203584313 +0000 UTC m=+1341.828264172" observedRunningTime="2026-01-23 11:11:18.208715459 +0000 UTC m=+1342.833395318" watchObservedRunningTime="2026-01-23 11:11:18.219187669 +0000 UTC m=+1342.843867548" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.410510 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s4nwf-config-vq8mx"] Jan 23 11:11:19 crc kubenswrapper[4689]: E0123 11:11:18.410895 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e16c499-f349-4cd0-bbe9-85524b31ba67" containerName="mariadb-account-create-update" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.410905 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e16c499-f349-4cd0-bbe9-85524b31ba67" containerName="mariadb-account-create-update" Jan 23 11:11:19 crc kubenswrapper[4689]: E0123 11:11:18.410928 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="919c9b5f-f82f-45c9-a6df-7193a6a7e6f0" containerName="mariadb-database-create" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.410934 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="919c9b5f-f82f-45c9-a6df-7193a6a7e6f0" containerName="mariadb-database-create" Jan 23 11:11:19 crc kubenswrapper[4689]: E0123 11:11:18.410950 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3d7001f-8caf-4867-bfec-1e60eaeefad8" containerName="mariadb-database-create" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.410956 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3d7001f-8caf-4867-bfec-1e60eaeefad8" containerName="mariadb-database-create" Jan 23 11:11:19 crc kubenswrapper[4689]: E0123 11:11:18.410970 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a9df33f-1781-47c3-b4dc-0f21bdf88a05" containerName="mariadb-account-create-update" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.410976 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a9df33f-1781-47c3-b4dc-0f21bdf88a05" containerName="mariadb-account-create-update" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.411265 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e16c499-f349-4cd0-bbe9-85524b31ba67" containerName="mariadb-account-create-update" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.411281 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3d7001f-8caf-4867-bfec-1e60eaeefad8" containerName="mariadb-database-create" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.411294 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a9df33f-1781-47c3-b4dc-0f21bdf88a05" containerName="mariadb-account-create-update" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.411311 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="919c9b5f-f82f-45c9-a6df-7193a6a7e6f0" containerName="mariadb-database-create" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.411966 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.427334 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.438582 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s4nwf-config-vq8mx"] Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.484356 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.484419 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-additional-scripts\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.484469 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbnhd\" (UniqueName: \"kubernetes.io/projected/adbb0d16-b112-411c-baf4-28ff742fb68d-kube-api-access-qbnhd\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.484522 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-log-ovn\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.484591 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-scripts\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.484663 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run-ovn\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.570062 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.589892 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.590031 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run-ovn\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.592856 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run-ovn\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.592965 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.593103 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.594463 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.596534 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.596598 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-additional-scripts\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.596684 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbnhd\" (UniqueName: \"kubernetes.io/projected/adbb0d16-b112-411c-baf4-28ff742fb68d-kube-api-access-qbnhd\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.596704 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.596797 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-log-ovn\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.596919 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-scripts\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.597371 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-log-ovn\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.600695 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-mcjz2" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.602049 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-scripts\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.602117 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.604193 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-additional-scripts\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.624715 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbnhd\" (UniqueName: \"kubernetes.io/projected/adbb0d16-b112-411c-baf4-28ff742fb68d-kube-api-access-qbnhd\") pod \"ovn-controller-s4nwf-config-vq8mx\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.699527 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw6c6\" (UniqueName: \"kubernetes.io/projected/43e3f941-da86-4f2a-80ea-24d29e55acb3-kube-api-access-rw6c6\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.699624 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.699644 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/43e3f941-da86-4f2a-80ea-24d29e55acb3-scripts\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.699807 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.699852 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.699892 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/43e3f941-da86-4f2a-80ea-24d29e55acb3-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.699944 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43e3f941-da86-4f2a-80ea-24d29e55acb3-config\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.717193 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.760606 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.804264 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-scripts\") pod \"3a33baed-7a5b-44f9-b344-114919fa316b\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.804343 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-combined-ca-bundle\") pod \"3a33baed-7a5b-44f9-b344-114919fa316b\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.804386 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-swiftconf\") pod \"3a33baed-7a5b-44f9-b344-114919fa316b\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.804456 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3a33baed-7a5b-44f9-b344-114919fa316b-etc-swift\") pod \"3a33baed-7a5b-44f9-b344-114919fa316b\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.804527 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-ring-data-devices\") pod \"3a33baed-7a5b-44f9-b344-114919fa316b\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.804574 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-dispersionconf\") pod \"3a33baed-7a5b-44f9-b344-114919fa316b\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.804673 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9w2m\" (UniqueName: \"kubernetes.io/projected/3a33baed-7a5b-44f9-b344-114919fa316b-kube-api-access-j9w2m\") pod \"3a33baed-7a5b-44f9-b344-114919fa316b\" (UID: \"3a33baed-7a5b-44f9-b344-114919fa316b\") " Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.807274 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a33baed-7a5b-44f9-b344-114919fa316b-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3a33baed-7a5b-44f9-b344-114919fa316b" (UID: "3a33baed-7a5b-44f9-b344-114919fa316b"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.807939 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "3a33baed-7a5b-44f9-b344-114919fa316b" (UID: "3a33baed-7a5b-44f9-b344-114919fa316b"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.810487 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/43e3f941-da86-4f2a-80ea-24d29e55acb3-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.810731 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43e3f941-da86-4f2a-80ea-24d29e55acb3-config\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.810935 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw6c6\" (UniqueName: \"kubernetes.io/projected/43e3f941-da86-4f2a-80ea-24d29e55acb3-kube-api-access-rw6c6\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.811054 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.811083 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/43e3f941-da86-4f2a-80ea-24d29e55acb3-scripts\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.814790 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/43e3f941-da86-4f2a-80ea-24d29e55acb3-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.814913 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/43e3f941-da86-4f2a-80ea-24d29e55acb3-scripts\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.816915 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43e3f941-da86-4f2a-80ea-24d29e55acb3-config\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.820896 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "3a33baed-7a5b-44f9-b344-114919fa316b" (UID: "3a33baed-7a5b-44f9-b344-114919fa316b"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.821600 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.822138 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.822370 4689 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3a33baed-7a5b-44f9-b344-114919fa316b-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.822387 4689 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.822398 4689 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.827451 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a33baed-7a5b-44f9-b344-114919fa316b-kube-api-access-j9w2m" (OuterVolumeSpecName: "kube-api-access-j9w2m") pod "3a33baed-7a5b-44f9-b344-114919fa316b" (UID: "3a33baed-7a5b-44f9-b344-114919fa316b"). InnerVolumeSpecName "kube-api-access-j9w2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.827904 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.831438 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.832991 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e3f941-da86-4f2a-80ea-24d29e55acb3-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.839101 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw6c6\" (UniqueName: \"kubernetes.io/projected/43e3f941-da86-4f2a-80ea-24d29e55acb3-kube-api-access-rw6c6\") pod \"ovn-northd-0\" (UID: \"43e3f941-da86-4f2a-80ea-24d29e55acb3\") " pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.846428 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a33baed-7a5b-44f9-b344-114919fa316b" (UID: "3a33baed-7a5b-44f9-b344-114919fa316b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.853387 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "3a33baed-7a5b-44f9-b344-114919fa316b" (UID: "3a33baed-7a5b-44f9-b344-114919fa316b"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.857575 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-scripts" (OuterVolumeSpecName: "scripts") pod "3a33baed-7a5b-44f9-b344-114919fa316b" (UID: "3a33baed-7a5b-44f9-b344-114919fa316b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.924108 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9w2m\" (UniqueName: \"kubernetes.io/projected/3a33baed-7a5b-44f9-b344-114919fa316b-kube-api-access-j9w2m\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.924141 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a33baed-7a5b-44f9-b344-114919fa316b-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.924183 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:18.924191 4689 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3a33baed-7a5b-44f9-b344-114919fa316b-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.031355 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.157595 4689 generic.go:334] "Generic (PLEG): container finished" podID="02b22380-5105-4302-a086-e75dd58e63e5" containerID="efe38824785111121aa433fb517c756f0a9a150e2cdcef8d1d38970f3f22e666" exitCode=0 Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.157773 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" event={"ID":"02b22380-5105-4302-a086-e75dd58e63e5","Type":"ContainerDied","Data":"efe38824785111121aa433fb517c756f0a9a150e2cdcef8d1d38970f3f22e666"} Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.163964 4689 generic.go:334] "Generic (PLEG): container finished" podID="7167d100-f446-4745-b762-16d355cf4b9c" containerID="3481f853370372acd70a380420bb978c71911e6cd833279f2a67bb92a5b07161" exitCode=0 Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.164041 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" event={"ID":"7167d100-f446-4745-b762-16d355cf4b9c","Type":"ContainerDied","Data":"3481f853370372acd70a380420bb978c71911e6cd833279f2a67bb92a5b07161"} Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.167216 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sb9zz" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.169287 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sb9zz" event={"ID":"3a33baed-7a5b-44f9-b344-114919fa316b","Type":"ContainerDied","Data":"f8359242de17f7452b8419360cfa43bb6bab61f2b81838e1519d3140fb026557"} Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.169360 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8359242de17f7452b8419360cfa43bb6bab61f2b81838e1519d3140fb026557" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.230725 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.243197 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/502e87fb-9e46-41c3-929e-c007018641db-etc-swift\") pod \"swift-storage-0\" (UID: \"502e87fb-9e46-41c3-929e-c007018641db\") " pod="openstack/swift-storage-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.293904 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.657087 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-5tlkt"] Jan 23 11:11:19 crc kubenswrapper[4689]: E0123 11:11:19.657923 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a33baed-7a5b-44f9-b344-114919fa316b" containerName="swift-ring-rebalance" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.657951 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a33baed-7a5b-44f9-b344-114919fa316b" containerName="swift-ring-rebalance" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.658217 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a33baed-7a5b-44f9-b344-114919fa316b" containerName="swift-ring-rebalance" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.662778 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.665480 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-pcxx6" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.667057 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.708501 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5tlkt"] Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.730698 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s4nwf-config-vq8mx"] Jan 23 11:11:19 crc kubenswrapper[4689]: W0123 11:11:19.737249 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43e3f941_da86_4f2a_80ea_24d29e55acb3.slice/crio-8ad427b136c2c306994c2dcec0cece9de7a4a9625a3d3fc659675d9411035284 WatchSource:0}: Error finding container 8ad427b136c2c306994c2dcec0cece9de7a4a9625a3d3fc659675d9411035284: Status 404 returned error can't find the container with id 8ad427b136c2c306994c2dcec0cece9de7a4a9625a3d3fc659675d9411035284 Jan 23 11:11:19 crc kubenswrapper[4689]: W0123 11:11:19.738461 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadbb0d16_b112_411c_baf4_28ff742fb68d.slice/crio-d9079a3ffbb0b3ce3c52e563aeb23a50016c9ea130832dda9dab94a2e88d7061 WatchSource:0}: Error finding container d9079a3ffbb0b3ce3c52e563aeb23a50016c9ea130832dda9dab94a2e88d7061: Status 404 returned error can't find the container with id d9079a3ffbb0b3ce3c52e563aeb23a50016c9ea130832dda9dab94a2e88d7061 Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.742485 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-db-sync-config-data\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.742523 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-combined-ca-bundle\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.742577 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc2tn\" (UniqueName: \"kubernetes.io/projected/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-kube-api-access-hc2tn\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.742627 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-config-data\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.749746 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.844591 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-config-data\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.845255 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-db-sync-config-data\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.845288 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-combined-ca-bundle\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.845336 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc2tn\" (UniqueName: \"kubernetes.io/projected/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-kube-api-access-hc2tn\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.852740 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-db-sync-config-data\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.853766 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-config-data\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.854451 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-combined-ca-bundle\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:19 crc kubenswrapper[4689]: I0123 11:11:19.860989 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc2tn\" (UniqueName: \"kubernetes.io/projected/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-kube-api-access-hc2tn\") pod \"glance-db-sync-5tlkt\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.015654 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5tlkt" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.036126 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 23 11:11:20 crc kubenswrapper[4689]: W0123 11:11:20.070403 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502e87fb_9e46_41c3_929e_c007018641db.slice/crio-53f00fe8167de6274e2246f19206f98467b2662c2112badaf3faf6810bd27815 WatchSource:0}: Error finding container 53f00fe8167de6274e2246f19206f98467b2662c2112badaf3faf6810bd27815: Status 404 returned error can't find the container with id 53f00fe8167de6274e2246f19206f98467b2662c2112badaf3faf6810bd27815 Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.200925 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf-config-vq8mx" event={"ID":"adbb0d16-b112-411c-baf4-28ff742fb68d","Type":"ContainerStarted","Data":"d9079a3ffbb0b3ce3c52e563aeb23a50016c9ea130832dda9dab94a2e88d7061"} Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.207158 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"53f00fe8167de6274e2246f19206f98467b2662c2112badaf3faf6810bd27815"} Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.217021 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"43e3f941-da86-4f2a-80ea-24d29e55acb3","Type":"ContainerStarted","Data":"8ad427b136c2c306994c2dcec0cece9de7a4a9625a3d3fc659675d9411035284"} Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.308665 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-s4nwf" podUID="6c69c7bf-0e75-4bed-a212-2b7746d5ef88" containerName="ovn-controller" probeResult="failure" output=< Jan 23 11:11:20 crc kubenswrapper[4689]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 23 11:11:20 crc kubenswrapper[4689]: > Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.818726 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.830441 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.878890 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4w9xp\" (UniqueName: \"kubernetes.io/projected/7167d100-f446-4745-b762-16d355cf4b9c-kube-api-access-4w9xp\") pod \"7167d100-f446-4745-b762-16d355cf4b9c\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.878997 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7167d100-f446-4745-b762-16d355cf4b9c-operator-scripts\") pod \"7167d100-f446-4745-b762-16d355cf4b9c\" (UID: \"7167d100-f446-4745-b762-16d355cf4b9c\") " Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.880493 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7167d100-f446-4745-b762-16d355cf4b9c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7167d100-f446-4745-b762-16d355cf4b9c" (UID: "7167d100-f446-4745-b762-16d355cf4b9c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.885208 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5tlkt"] Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.889601 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7167d100-f446-4745-b762-16d355cf4b9c-kube-api-access-4w9xp" (OuterVolumeSpecName: "kube-api-access-4w9xp") pod "7167d100-f446-4745-b762-16d355cf4b9c" (UID: "7167d100-f446-4745-b762-16d355cf4b9c"). InnerVolumeSpecName "kube-api-access-4w9xp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.918935 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.127:5671: connect: connection refused" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.978075 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.980308 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02b22380-5105-4302-a086-e75dd58e63e5-operator-scripts\") pod \"02b22380-5105-4302-a086-e75dd58e63e5\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.981088 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vhr2\" (UniqueName: \"kubernetes.io/projected/02b22380-5105-4302-a086-e75dd58e63e5-kube-api-access-6vhr2\") pod \"02b22380-5105-4302-a086-e75dd58e63e5\" (UID: \"02b22380-5105-4302-a086-e75dd58e63e5\") " Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.980913 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02b22380-5105-4302-a086-e75dd58e63e5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "02b22380-5105-4302-a086-e75dd58e63e5" (UID: "02b22380-5105-4302-a086-e75dd58e63e5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.982222 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4w9xp\" (UniqueName: \"kubernetes.io/projected/7167d100-f446-4745-b762-16d355cf4b9c-kube-api-access-4w9xp\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.982245 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02b22380-5105-4302-a086-e75dd58e63e5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.982254 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7167d100-f446-4745-b762-16d355cf4b9c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:20 crc kubenswrapper[4689]: I0123 11:11:20.984214 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02b22380-5105-4302-a086-e75dd58e63e5-kube-api-access-6vhr2" (OuterVolumeSpecName: "kube-api-access-6vhr2") pod "02b22380-5105-4302-a086-e75dd58e63e5" (UID: "02b22380-5105-4302-a086-e75dd58e63e5"). InnerVolumeSpecName "kube-api-access-6vhr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.084745 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vhr2\" (UniqueName: \"kubernetes.io/projected/02b22380-5105-4302-a086-e75dd58e63e5-kube-api-access-6vhr2\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.228921 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" event={"ID":"02b22380-5105-4302-a086-e75dd58e63e5","Type":"ContainerDied","Data":"2c8823883d2cbaa07b920a9bce48db012e9235f1708d98c6a0f7488a83c8d22b"} Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.228962 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c8823883d2cbaa07b920a9bce48db012e9235f1708d98c6a0f7488a83c8d22b" Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.228966 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-2f06-account-create-update-7hlnh" Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.232133 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" event={"ID":"7167d100-f446-4745-b762-16d355cf4b9c","Type":"ContainerDied","Data":"7fe4304394bd77a7677fa09fdb943cfed1d5192861607fc0be13b32e35f7a324"} Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.232168 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7fe4304394bd77a7677fa09fdb943cfed1d5192861607fc0be13b32e35f7a324" Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.232198 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-nxlln" Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.239799 4689 generic.go:334] "Generic (PLEG): container finished" podID="adbb0d16-b112-411c-baf4-28ff742fb68d" containerID="477a61405d5cddaa22f592d37dc0b1a76176ff22521ab00ad5520c4c7c2d41ea" exitCode=0 Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.239889 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf-config-vq8mx" event={"ID":"adbb0d16-b112-411c-baf4-28ff742fb68d","Type":"ContainerDied","Data":"477a61405d5cddaa22f592d37dc0b1a76176ff22521ab00ad5520c4c7c2d41ea"} Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.247730 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5tlkt" event={"ID":"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3","Type":"ContainerStarted","Data":"4db5b1c2f033f9a25b1a9eb343e730ba9430834bc567bda3abb934a9820677a6"} Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.259808 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Jan 23 11:11:21 crc kubenswrapper[4689]: I0123 11:11:21.352317 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.257406 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"43e3f941-da86-4f2a-80ea-24d29e55acb3","Type":"ContainerStarted","Data":"0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4"} Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.259500 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"f8f44b12ca0f1de4d53f46acfa18f66d1e72805503be9d6e7cf4c6798c633efd"} Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.538468 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.538841 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.539268 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-vj2d8"] Jan 23 11:11:22 crc kubenswrapper[4689]: E0123 11:11:22.539959 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02b22380-5105-4302-a086-e75dd58e63e5" containerName="mariadb-account-create-update" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.539975 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="02b22380-5105-4302-a086-e75dd58e63e5" containerName="mariadb-account-create-update" Jan 23 11:11:22 crc kubenswrapper[4689]: E0123 11:11:22.539994 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7167d100-f446-4745-b762-16d355cf4b9c" containerName="mariadb-database-create" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.540001 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7167d100-f446-4745-b762-16d355cf4b9c" containerName="mariadb-database-create" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.540242 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="02b22380-5105-4302-a086-e75dd58e63e5" containerName="mariadb-account-create-update" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.540261 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7167d100-f446-4745-b762-16d355cf4b9c" containerName="mariadb-database-create" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.540970 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.542074 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.543468 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.548194 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-vj2d8"] Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.625856 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-operator-scripts\") pod \"root-account-create-update-vj2d8\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.626197 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w6kv\" (UniqueName: \"kubernetes.io/projected/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-kube-api-access-8w6kv\") pod \"root-account-create-update-vj2d8\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.640133 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.728469 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run\") pod \"adbb0d16-b112-411c-baf4-28ff742fb68d\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.728550 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run" (OuterVolumeSpecName: "var-run") pod "adbb0d16-b112-411c-baf4-28ff742fb68d" (UID: "adbb0d16-b112-411c-baf4-28ff742fb68d"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.728637 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-additional-scripts\") pod \"adbb0d16-b112-411c-baf4-28ff742fb68d\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.729485 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "adbb0d16-b112-411c-baf4-28ff742fb68d" (UID: "adbb0d16-b112-411c-baf4-28ff742fb68d"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.729539 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-log-ovn\") pod \"adbb0d16-b112-411c-baf4-28ff742fb68d\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.729606 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "adbb0d16-b112-411c-baf4-28ff742fb68d" (UID: "adbb0d16-b112-411c-baf4-28ff742fb68d"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.729650 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-scripts\") pod \"adbb0d16-b112-411c-baf4-28ff742fb68d\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.730507 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-scripts" (OuterVolumeSpecName: "scripts") pod "adbb0d16-b112-411c-baf4-28ff742fb68d" (UID: "adbb0d16-b112-411c-baf4-28ff742fb68d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.730600 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run-ovn\") pod \"adbb0d16-b112-411c-baf4-28ff742fb68d\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.730663 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "adbb0d16-b112-411c-baf4-28ff742fb68d" (UID: "adbb0d16-b112-411c-baf4-28ff742fb68d"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.730792 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbnhd\" (UniqueName: \"kubernetes.io/projected/adbb0d16-b112-411c-baf4-28ff742fb68d-kube-api-access-qbnhd\") pod \"adbb0d16-b112-411c-baf4-28ff742fb68d\" (UID: \"adbb0d16-b112-411c-baf4-28ff742fb68d\") " Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.731342 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w6kv\" (UniqueName: \"kubernetes.io/projected/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-kube-api-access-8w6kv\") pod \"root-account-create-update-vj2d8\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.732519 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-operator-scripts\") pod \"root-account-create-update-vj2d8\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.734390 4689 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.734703 4689 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.734786 4689 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.734864 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adbb0d16-b112-411c-baf4-28ff742fb68d-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.734921 4689 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/adbb0d16-b112-411c-baf4-28ff742fb68d-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.733901 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-operator-scripts\") pod \"root-account-create-update-vj2d8\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.733890 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adbb0d16-b112-411c-baf4-28ff742fb68d-kube-api-access-qbnhd" (OuterVolumeSpecName: "kube-api-access-qbnhd") pod "adbb0d16-b112-411c-baf4-28ff742fb68d" (UID: "adbb0d16-b112-411c-baf4-28ff742fb68d"). InnerVolumeSpecName "kube-api-access-qbnhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.751899 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w6kv\" (UniqueName: \"kubernetes.io/projected/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-kube-api-access-8w6kv\") pod \"root-account-create-update-vj2d8\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.836981 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbnhd\" (UniqueName: \"kubernetes.io/projected/adbb0d16-b112-411c-baf4-28ff742fb68d-kube-api-access-qbnhd\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.864482 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.901750 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s4f4r"] Jan 23 11:11:22 crc kubenswrapper[4689]: E0123 11:11:22.902391 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adbb0d16-b112-411c-baf4-28ff742fb68d" containerName="ovn-config" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.902415 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="adbb0d16-b112-411c-baf4-28ff742fb68d" containerName="ovn-config" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.902856 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="adbb0d16-b112-411c-baf4-28ff742fb68d" containerName="ovn-config" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.905003 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:22 crc kubenswrapper[4689]: I0123 11:11:22.917420 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4f4r"] Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.040244 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-utilities\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.040827 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-catalog-content\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.040915 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmnxn\" (UniqueName: \"kubernetes.io/projected/287c61f1-ab32-444f-a868-7f522f2a4916-kube-api-access-rmnxn\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.142347 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-catalog-content\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.142426 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmnxn\" (UniqueName: \"kubernetes.io/projected/287c61f1-ab32-444f-a868-7f522f2a4916-kube-api-access-rmnxn\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.142465 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-utilities\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.142889 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-catalog-content\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.142915 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-utilities\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.164248 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmnxn\" (UniqueName: \"kubernetes.io/projected/287c61f1-ab32-444f-a868-7f522f2a4916-kube-api-access-rmnxn\") pod \"redhat-marketplace-s4f4r\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.221365 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.273518 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf-config-vq8mx" event={"ID":"adbb0d16-b112-411c-baf4-28ff742fb68d","Type":"ContainerDied","Data":"d9079a3ffbb0b3ce3c52e563aeb23a50016c9ea130832dda9dab94a2e88d7061"} Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.273560 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9079a3ffbb0b3ce3c52e563aeb23a50016c9ea130832dda9dab94a2e88d7061" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.273923 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-vq8mx" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.281080 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"0ddf4a0e5e90cfd016609ca1ee2d5f168321f1ca95582c18b16cb26a6c67f1e8"} Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.281447 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"25b3d2592fdcfa8f63066c222ac61aed9044efd448d86373fd24cc75910ac3d0"} Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.281464 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"675aaf532922f27352eab8aef24cf155e30cd26849598e889184dd1f7b9664b7"} Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.284310 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"43e3f941-da86-4f2a-80ea-24d29e55acb3","Type":"ContainerStarted","Data":"dbcbc56ee4b534916555e89d2ade0c7cc0eee6167b0b7e21e5b6221cb2dd43eb"} Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.284580 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.285847 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.317248 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.225611201 podStartE2EDuration="5.317227756s" podCreationTimestamp="2026-01-23 11:11:18 +0000 UTC" firstStartedPulling="2026-01-23 11:11:19.740836054 +0000 UTC m=+1344.365515913" lastFinishedPulling="2026-01-23 11:11:21.832452609 +0000 UTC m=+1346.457132468" observedRunningTime="2026-01-23 11:11:23.309861163 +0000 UTC m=+1347.934541022" watchObservedRunningTime="2026-01-23 11:11:23.317227756 +0000 UTC m=+1347.941907615" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.440338 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-vj2d8"] Jan 23 11:11:23 crc kubenswrapper[4689]: W0123 11:11:23.451833 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc52a6d4e_b28d_49de_ba97_65d3e33e9d56.slice/crio-eb042e0ae85f1479c8cf26fac146f307a98fe7f2aee6470fc138f08ed8a4effe WatchSource:0}: Error finding container eb042e0ae85f1479c8cf26fac146f307a98fe7f2aee6470fc138f08ed8a4effe: Status 404 returned error can't find the container with id eb042e0ae85f1479c8cf26fac146f307a98fe7f2aee6470fc138f08ed8a4effe Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.737190 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-s4nwf-config-vq8mx"] Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.753938 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-s4nwf-config-vq8mx"] Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.799599 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4f4r"] Jan 23 11:11:23 crc kubenswrapper[4689]: W0123 11:11:23.800404 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod287c61f1_ab32_444f_a868_7f522f2a4916.slice/crio-b429db6ec2495fb2e0dd7ff7ce59dbe51baa4aeff86be6f21ace6a213830a5a5 WatchSource:0}: Error finding container b429db6ec2495fb2e0dd7ff7ce59dbe51baa4aeff86be6f21ace6a213830a5a5: Status 404 returned error can't find the container with id b429db6ec2495fb2e0dd7ff7ce59dbe51baa4aeff86be6f21ace6a213830a5a5 Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.832479 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s4nwf-config-pz2gn"] Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.833905 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.836251 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.844186 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s4nwf-config-pz2gn"] Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.860400 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-scripts\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.860641 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.861046 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-additional-scripts\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.861210 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run-ovn\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.861408 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-log-ovn\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.861515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqssk\" (UniqueName: \"kubernetes.io/projected/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-kube-api-access-cqssk\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.962924 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-log-ovn\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.962986 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqssk\" (UniqueName: \"kubernetes.io/projected/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-kube-api-access-cqssk\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963072 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-scripts\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963110 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963215 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-additional-scripts\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963239 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run-ovn\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963362 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-log-ovn\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963362 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963378 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run-ovn\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.963938 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-additional-scripts\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.966610 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-scripts\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:23 crc kubenswrapper[4689]: I0123 11:11:23.982132 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqssk\" (UniqueName: \"kubernetes.io/projected/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-kube-api-access-cqssk\") pod \"ovn-controller-s4nwf-config-pz2gn\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.166929 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.295497 4689 generic.go:334] "Generic (PLEG): container finished" podID="287c61f1-ab32-444f-a868-7f522f2a4916" containerID="5203fdef91227f2ea52817270e91a2758ad79b91487523ccdae108c74a347ffe" exitCode=0 Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.295544 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4f4r" event={"ID":"287c61f1-ab32-444f-a868-7f522f2a4916","Type":"ContainerDied","Data":"5203fdef91227f2ea52817270e91a2758ad79b91487523ccdae108c74a347ffe"} Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.295579 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4f4r" event={"ID":"287c61f1-ab32-444f-a868-7f522f2a4916","Type":"ContainerStarted","Data":"b429db6ec2495fb2e0dd7ff7ce59dbe51baa4aeff86be6f21ace6a213830a5a5"} Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.301022 4689 generic.go:334] "Generic (PLEG): container finished" podID="c52a6d4e-b28d-49de-ba97-65d3e33e9d56" containerID="a186d3e2c17ebd6163efc399489f67b9a8a9288c88753bf502bc3ac17fe4533d" exitCode=0 Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.301070 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-vj2d8" event={"ID":"c52a6d4e-b28d-49de-ba97-65d3e33e9d56","Type":"ContainerDied","Data":"a186d3e2c17ebd6163efc399489f67b9a8a9288c88753bf502bc3ac17fe4533d"} Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.301118 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-vj2d8" event={"ID":"c52a6d4e-b28d-49de-ba97-65d3e33e9d56","Type":"ContainerStarted","Data":"eb042e0ae85f1479c8cf26fac146f307a98fe7f2aee6470fc138f08ed8a4effe"} Jan 23 11:11:24 crc kubenswrapper[4689]: I0123 11:11:24.936731 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s4nwf-config-pz2gn"] Jan 23 11:11:24 crc kubenswrapper[4689]: W0123 11:11:24.950532 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7968dfee_57b2_4801_b7a1_661fe4bd7c2b.slice/crio-9423219986e236ef0ed1eacbd914a737cb4fd99b182a110f2f0b4745dca3976d WatchSource:0}: Error finding container 9423219986e236ef0ed1eacbd914a737cb4fd99b182a110f2f0b4745dca3976d: Status 404 returned error can't find the container with id 9423219986e236ef0ed1eacbd914a737cb4fd99b182a110f2f0b4745dca3976d Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.323388 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf-config-pz2gn" event={"ID":"7968dfee-57b2-4801-b7a1-661fe4bd7c2b","Type":"ContainerStarted","Data":"9423219986e236ef0ed1eacbd914a737cb4fd99b182a110f2f0b4745dca3976d"} Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.328898 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"e7cb78a07adc5f1be59816689c07904ebd085b3ae2420aad057782d9cf1b6a44"} Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.328930 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"ee1930b357412b4bbd20893dacd3f4ccb3976ea4379a31485c60576a34233305"} Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.337778 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-s4nwf" Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.654818 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adbb0d16-b112-411c-baf4-28ff742fb68d" path="/var/lib/kubelet/pods/adbb0d16-b112-411c-baf4-28ff742fb68d/volumes" Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.687796 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.709660 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-operator-scripts\") pod \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.709801 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w6kv\" (UniqueName: \"kubernetes.io/projected/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-kube-api-access-8w6kv\") pod \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\" (UID: \"c52a6d4e-b28d-49de-ba97-65d3e33e9d56\") " Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.710888 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c52a6d4e-b28d-49de-ba97-65d3e33e9d56" (UID: "c52a6d4e-b28d-49de-ba97-65d3e33e9d56"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.716497 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-kube-api-access-8w6kv" (OuterVolumeSpecName: "kube-api-access-8w6kv") pod "c52a6d4e-b28d-49de-ba97-65d3e33e9d56" (UID: "c52a6d4e-b28d-49de-ba97-65d3e33e9d56"). InnerVolumeSpecName "kube-api-access-8w6kv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.812909 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:25 crc kubenswrapper[4689]: I0123 11:11:25.812944 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w6kv\" (UniqueName: \"kubernetes.io/projected/c52a6d4e-b28d-49de-ba97-65d3e33e9d56-kube-api-access-8w6kv\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.245639 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.246079 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="prometheus" containerID="cri-o://1310290e165d150996358a466360c57a9de403355d235b889f236bd0ac425459" gracePeriod=600 Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.246730 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="config-reloader" containerID="cri-o://70db53e6882bea47c8e8cf0319e00077420a0150cde8fb9fb2c5406c1c9d487b" gracePeriod=600 Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.246819 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="thanos-sidecar" containerID="cri-o://9771dbc496627ce3796b50ee2d64ab65bf1bbab3a1b1639171333ea70927098d" gracePeriod=600 Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.363014 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4f4r" event={"ID":"287c61f1-ab32-444f-a868-7f522f2a4916","Type":"ContainerStarted","Data":"19f914e5c2f2934f55e25abc740a4c3371b6bd86f7e4fc9a8a3d1fd9e1246f62"} Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.366930 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-vj2d8" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.366931 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-vj2d8" event={"ID":"c52a6d4e-b28d-49de-ba97-65d3e33e9d56","Type":"ContainerDied","Data":"eb042e0ae85f1479c8cf26fac146f307a98fe7f2aee6470fc138f08ed8a4effe"} Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.367192 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb042e0ae85f1479c8cf26fac146f307a98fe7f2aee6470fc138f08ed8a4effe" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.422542 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"ca3f47407c517ead63bb320dafe80be4c3d1d872d1eb7bc3e8a2c71c869e7ab7"} Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.429955 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf-config-pz2gn" event={"ID":"7968dfee-57b2-4801-b7a1-661fe4bd7c2b","Type":"ContainerStarted","Data":"4a9a449081748ed764ee2d33be3a3fc6da7857eac3c98ea4c36f4f6e79b58d07"} Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.526315 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-s4nwf-config-pz2gn" podStartSLOduration=3.526289671 podStartE2EDuration="3.526289671s" podCreationTimestamp="2026-01-23 11:11:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:26.479684181 +0000 UTC m=+1351.104364040" watchObservedRunningTime="2026-01-23 11:11:26.526289671 +0000 UTC m=+1351.150969530" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.541214 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-lh96s"] Jan 23 11:11:26 crc kubenswrapper[4689]: E0123 11:11:26.541958 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52a6d4e-b28d-49de-ba97-65d3e33e9d56" containerName="mariadb-account-create-update" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.542236 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52a6d4e-b28d-49de-ba97-65d3e33e9d56" containerName="mariadb-account-create-update" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.542711 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c52a6d4e-b28d-49de-ba97-65d3e33e9d56" containerName="mariadb-account-create-update" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.544092 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.555279 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-lh96s"] Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.644386 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3602ba50-13ae-492b-855d-9b7f51c6d398-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-lh96s\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.644511 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blgb8\" (UniqueName: \"kubernetes.io/projected/3602ba50-13ae-492b-855d-9b7f51c6d398-kube-api-access-blgb8\") pod \"mysqld-exporter-openstack-cell1-db-create-lh96s\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.749525 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3602ba50-13ae-492b-855d-9b7f51c6d398-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-lh96s\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.749646 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blgb8\" (UniqueName: \"kubernetes.io/projected/3602ba50-13ae-492b-855d-9b7f51c6d398-kube-api-access-blgb8\") pod \"mysqld-exporter-openstack-cell1-db-create-lh96s\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.750957 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3602ba50-13ae-492b-855d-9b7f51c6d398-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-lh96s\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.775473 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blgb8\" (UniqueName: \"kubernetes.io/projected/3602ba50-13ae-492b-855d-9b7f51c6d398-kube-api-access-blgb8\") pod \"mysqld-exporter-openstack-cell1-db-create-lh96s\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.823114 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-8e94-account-create-update-5f7ck"] Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.825047 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.832707 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.841755 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-8e94-account-create-update-5f7ck"] Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.917275 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.954795 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e72bd573-8bb9-43e3-8bce-26701d118894-operator-scripts\") pod \"mysqld-exporter-8e94-account-create-update-5f7ck\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:26 crc kubenswrapper[4689]: I0123 11:11:26.954859 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgrcm\" (UniqueName: \"kubernetes.io/projected/e72bd573-8bb9-43e3-8bce-26701d118894-kube-api-access-wgrcm\") pod \"mysqld-exporter-8e94-account-create-update-5f7ck\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.057351 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e72bd573-8bb9-43e3-8bce-26701d118894-operator-scripts\") pod \"mysqld-exporter-8e94-account-create-update-5f7ck\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.057400 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgrcm\" (UniqueName: \"kubernetes.io/projected/e72bd573-8bb9-43e3-8bce-26701d118894-kube-api-access-wgrcm\") pod \"mysqld-exporter-8e94-account-create-update-5f7ck\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.058362 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e72bd573-8bb9-43e3-8bce-26701d118894-operator-scripts\") pod \"mysqld-exporter-8e94-account-create-update-5f7ck\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.075977 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgrcm\" (UniqueName: \"kubernetes.io/projected/e72bd573-8bb9-43e3-8bce-26701d118894-kube-api-access-wgrcm\") pod \"mysqld-exporter-8e94-account-create-update-5f7ck\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.160732 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.375690 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-lh96s"] Jan 23 11:11:27 crc kubenswrapper[4689]: W0123 11:11:27.386897 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3602ba50_13ae_492b_855d_9b7f51c6d398.slice/crio-a6c26b9317cd0bbff8479b8d7f51a3597dfdcff87d838b38965e2b8e02e6b7e8 WatchSource:0}: Error finding container a6c26b9317cd0bbff8479b8d7f51a3597dfdcff87d838b38965e2b8e02e6b7e8: Status 404 returned error can't find the container with id a6c26b9317cd0bbff8479b8d7f51a3597dfdcff87d838b38965e2b8e02e6b7e8 Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.442277 4689 generic.go:334] "Generic (PLEG): container finished" podID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerID="9771dbc496627ce3796b50ee2d64ab65bf1bbab3a1b1639171333ea70927098d" exitCode=0 Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.442308 4689 generic.go:334] "Generic (PLEG): container finished" podID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerID="70db53e6882bea47c8e8cf0319e00077420a0150cde8fb9fb2c5406c1c9d487b" exitCode=0 Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.442315 4689 generic.go:334] "Generic (PLEG): container finished" podID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerID="1310290e165d150996358a466360c57a9de403355d235b889f236bd0ac425459" exitCode=0 Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.442353 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerDied","Data":"9771dbc496627ce3796b50ee2d64ab65bf1bbab3a1b1639171333ea70927098d"} Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.442377 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerDied","Data":"70db53e6882bea47c8e8cf0319e00077420a0150cde8fb9fb2c5406c1c9d487b"} Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.442386 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerDied","Data":"1310290e165d150996358a466360c57a9de403355d235b889f236bd0ac425459"} Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.443382 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" event={"ID":"3602ba50-13ae-492b-855d-9b7f51c6d398","Type":"ContainerStarted","Data":"a6c26b9317cd0bbff8479b8d7f51a3597dfdcff87d838b38965e2b8e02e6b7e8"} Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.445951 4689 generic.go:334] "Generic (PLEG): container finished" podID="287c61f1-ab32-444f-a868-7f522f2a4916" containerID="19f914e5c2f2934f55e25abc740a4c3371b6bd86f7e4fc9a8a3d1fd9e1246f62" exitCode=0 Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.446064 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4f4r" event={"ID":"287c61f1-ab32-444f-a868-7f522f2a4916","Type":"ContainerDied","Data":"19f914e5c2f2934f55e25abc740a4c3371b6bd86f7e4fc9a8a3d1fd9e1246f62"} Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.539069 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.137:9090/-/ready\": dial tcp 10.217.0.137:9090: connect: connection refused" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.631746 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-8e94-account-create-update-5f7ck"] Jan 23 11:11:27 crc kubenswrapper[4689]: W0123 11:11:27.664738 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode72bd573_8bb9_43e3_8bce_26701d118894.slice/crio-71d54d7538fd6b15a38898fc2bf9f6779fa0961a615aac899e4714a5ce937389 WatchSource:0}: Error finding container 71d54d7538fd6b15a38898fc2bf9f6779fa0961a615aac899e4714a5ce937389: Status 404 returned error can't find the container with id 71d54d7538fd6b15a38898fc2bf9f6779fa0961a615aac899e4714a5ce937389 Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.858077 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988043 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-2\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988418 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-tls-assets\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988463 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/963b9b68-de0c-48ca-8a48-4641f6eb3688-config-out\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988576 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988610 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-0\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988632 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-config\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988675 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-web-config\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988700 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-thanos-prometheus-http-client-file\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988744 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-1\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988800 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s98xq\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-kube-api-access-s98xq\") pod \"963b9b68-de0c-48ca-8a48-4641f6eb3688\" (UID: \"963b9b68-de0c-48ca-8a48-4641f6eb3688\") " Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.988952 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.989283 4689 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.989413 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.991594 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:27 crc kubenswrapper[4689]: I0123 11:11:27.995639 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/963b9b68-de0c-48ca-8a48-4641f6eb3688-config-out" (OuterVolumeSpecName: "config-out") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.001207 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-config" (OuterVolumeSpecName: "config") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.001420 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.001510 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-kube-api-access-s98xq" (OuterVolumeSpecName: "kube-api-access-s98xq") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "kube-api-access-s98xq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.004841 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.022995 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.050078 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-web-config" (OuterVolumeSpecName: "web-config") pod "963b9b68-de0c-48ca-8a48-4641f6eb3688" (UID: "963b9b68-de0c-48ca-8a48-4641f6eb3688"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091780 4689 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091820 4689 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/963b9b68-de0c-48ca-8a48-4641f6eb3688-config-out\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091872 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") on node \"crc\" " Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091891 4689 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091904 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091915 4689 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-web-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091925 4689 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/963b9b68-de0c-48ca-8a48-4641f6eb3688-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091938 4689 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/963b9b68-de0c-48ca-8a48-4641f6eb3688-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.091987 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s98xq\" (UniqueName: \"kubernetes.io/projected/963b9b68-de0c-48ca-8a48-4641f6eb3688-kube-api-access-s98xq\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.117444 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.117624 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5") on node "crc" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.193463 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.458255 4689 generic.go:334] "Generic (PLEG): container finished" podID="e72bd573-8bb9-43e3-8bce-26701d118894" containerID="2c3887635fcf01b1eb4460e1770da411159c3add513b47bd4a3222c0fa782da9" exitCode=0 Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.458346 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" event={"ID":"e72bd573-8bb9-43e3-8bce-26701d118894","Type":"ContainerDied","Data":"2c3887635fcf01b1eb4460e1770da411159c3add513b47bd4a3222c0fa782da9"} Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.458741 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" event={"ID":"e72bd573-8bb9-43e3-8bce-26701d118894","Type":"ContainerStarted","Data":"71d54d7538fd6b15a38898fc2bf9f6779fa0961a615aac899e4714a5ce937389"} Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.467193 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"963b9b68-de0c-48ca-8a48-4641f6eb3688","Type":"ContainerDied","Data":"c7764eee40e8cd9678326b78683d0231b5d9b231b9d9c3a2713e7761a4561571"} Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.467254 4689 scope.go:117] "RemoveContainer" containerID="9771dbc496627ce3796b50ee2d64ab65bf1bbab3a1b1639171333ea70927098d" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.467472 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.482046 4689 generic.go:334] "Generic (PLEG): container finished" podID="7968dfee-57b2-4801-b7a1-661fe4bd7c2b" containerID="4a9a449081748ed764ee2d33be3a3fc6da7857eac3c98ea4c36f4f6e79b58d07" exitCode=0 Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.482108 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf-config-pz2gn" event={"ID":"7968dfee-57b2-4801-b7a1-661fe4bd7c2b","Type":"ContainerDied","Data":"4a9a449081748ed764ee2d33be3a3fc6da7857eac3c98ea4c36f4f6e79b58d07"} Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.485227 4689 generic.go:334] "Generic (PLEG): container finished" podID="3602ba50-13ae-492b-855d-9b7f51c6d398" containerID="3d49a105448f011c8629836c87cae15ed2c23eb6deb83b05e8fcfbe31650f4b8" exitCode=0 Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.485295 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" event={"ID":"3602ba50-13ae-492b-855d-9b7f51c6d398","Type":"ContainerDied","Data":"3d49a105448f011c8629836c87cae15ed2c23eb6deb83b05e8fcfbe31650f4b8"} Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.488022 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4f4r" event={"ID":"287c61f1-ab32-444f-a868-7f522f2a4916","Type":"ContainerStarted","Data":"bfb1a1bc7ca07e3d7df7950723940e4b100dbaa2dc5fdb7419862d4cc47d96f8"} Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.502219 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"7086214ae4aaed6bd8bcc28cfb4ae29169294156558a2037208bcc4a7088b40a"} Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.524325 4689 scope.go:117] "RemoveContainer" containerID="70db53e6882bea47c8e8cf0319e00077420a0150cde8fb9fb2c5406c1c9d487b" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.528826 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s4f4r" podStartSLOduration=3.052546756 podStartE2EDuration="6.528804488s" podCreationTimestamp="2026-01-23 11:11:22 +0000 UTC" firstStartedPulling="2026-01-23 11:11:24.413048538 +0000 UTC m=+1349.037728397" lastFinishedPulling="2026-01-23 11:11:27.88930627 +0000 UTC m=+1352.513986129" observedRunningTime="2026-01-23 11:11:28.51842154 +0000 UTC m=+1353.143101389" watchObservedRunningTime="2026-01-23 11:11:28.528804488 +0000 UTC m=+1353.153484347" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.557262 4689 scope.go:117] "RemoveContainer" containerID="1310290e165d150996358a466360c57a9de403355d235b889f236bd0ac425459" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.567368 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.579963 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.604427 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:11:28 crc kubenswrapper[4689]: E0123 11:11:28.604932 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="init-config-reloader" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.604956 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="init-config-reloader" Jan 23 11:11:28 crc kubenswrapper[4689]: E0123 11:11:28.604973 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="config-reloader" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.604981 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="config-reloader" Jan 23 11:11:28 crc kubenswrapper[4689]: E0123 11:11:28.605003 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="thanos-sidecar" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.605012 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="thanos-sidecar" Jan 23 11:11:28 crc kubenswrapper[4689]: E0123 11:11:28.605033 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="prometheus" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.605040 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="prometheus" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.605311 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="config-reloader" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.605343 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="prometheus" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.605360 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" containerName="thanos-sidecar" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.609518 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.616006 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.616220 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.618327 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.618715 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.619019 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.619167 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-78tqt" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.619441 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.619642 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.619749 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.621868 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710674 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710786 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710821 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqz8v\" (UniqueName: \"kubernetes.io/projected/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-kube-api-access-mqz8v\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710846 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710887 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710925 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710952 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.710974 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.711020 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.711071 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.711185 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.711209 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.711289 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-config\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812530 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-config\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812625 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812670 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812697 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqz8v\" (UniqueName: \"kubernetes.io/projected/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-kube-api-access-mqz8v\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812720 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812745 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812769 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812787 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812803 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812841 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812881 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812921 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.812940 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.814414 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.814586 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.814983 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.817560 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.817610 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0e6ddec39173b96d0ed120a8bb31333c778d498bbf2e1919046af9890c5ee84b/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.819598 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.820533 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.823087 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.823712 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.826362 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.826520 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-config\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.831757 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqz8v\" (UniqueName: \"kubernetes.io/projected/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-kube-api-access-mqz8v\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.836129 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.840927 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8cc0bc0c-47d7-48d8-bfba-a9694ab485a0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.868383 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e74b35ee-3dee-46ce-b8c1-147d381597f5\") pod \"prometheus-metric-storage-0\" (UID: \"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0\") " pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:28 crc kubenswrapper[4689]: I0123 11:11:28.943294 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 23 11:11:29 crc kubenswrapper[4689]: I0123 11:11:29.651462 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="963b9b68-de0c-48ca-8a48-4641f6eb3688" path="/var/lib/kubelet/pods/963b9b68-de0c-48ca-8a48-4641f6eb3688/volumes" Jan 23 11:11:29 crc kubenswrapper[4689]: I0123 11:11:29.756129 4689 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","pod7525f02f-3c52-4e75-bace-0d3e1bedeee8"] err="unable to destroy cgroup paths for cgroup [kubepods burstable pod7525f02f-3c52-4e75-bace-0d3e1bedeee8] : Timed out while waiting for systemd to remove kubepods-burstable-pod7525f02f_3c52_4e75_bace_0d3e1bedeee8.slice" Jan 23 11:11:29 crc kubenswrapper[4689]: E0123 11:11:29.756217 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods burstable pod7525f02f-3c52-4e75-bace-0d3e1bedeee8] : unable to destroy cgroup paths for cgroup [kubepods burstable pod7525f02f-3c52-4e75-bace-0d3e1bedeee8] : Timed out while waiting for systemd to remove kubepods-burstable-pod7525f02f_3c52_4e75_bace_0d3e1bedeee8.slice" pod="openshift-console/console-7dddff5c78-d9qgh" podUID="7525f02f-3c52-4e75-bace-0d3e1bedeee8" Jan 23 11:11:30 crc kubenswrapper[4689]: I0123 11:11:30.527919 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dddff5c78-d9qgh" Jan 23 11:11:30 crc kubenswrapper[4689]: I0123 11:11:30.560517 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7dddff5c78-d9qgh"] Jan 23 11:11:30 crc kubenswrapper[4689]: I0123 11:11:30.569548 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-7dddff5c78-d9qgh"] Jan 23 11:11:30 crc kubenswrapper[4689]: I0123 11:11:30.920351 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.207719 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-r9t2h"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.208971 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.277075 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3173d9-5b9e-47fc-a5aa-3a260297c997-operator-scripts\") pod \"heat-db-create-r9t2h\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.277356 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzg66\" (UniqueName: \"kubernetes.io/projected/cc3173d9-5b9e-47fc-a5aa-3a260297c997-kube-api-access-xzg66\") pod \"heat-db-create-r9t2h\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.290622 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-r9t2h"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.319333 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-rh66n"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.321622 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.349704 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-rh66n"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.379767 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7n8kf\" (UniqueName: \"kubernetes.io/projected/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-kube-api-access-7n8kf\") pod \"cinder-db-create-rh66n\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.379844 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-operator-scripts\") pod \"cinder-db-create-rh66n\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.379916 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3173d9-5b9e-47fc-a5aa-3a260297c997-operator-scripts\") pod \"heat-db-create-r9t2h\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.380014 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzg66\" (UniqueName: \"kubernetes.io/projected/cc3173d9-5b9e-47fc-a5aa-3a260297c997-kube-api-access-xzg66\") pod \"heat-db-create-r9t2h\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.381905 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3173d9-5b9e-47fc-a5aa-3a260297c997-operator-scripts\") pod \"heat-db-create-r9t2h\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.410651 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzg66\" (UniqueName: \"kubernetes.io/projected/cc3173d9-5b9e-47fc-a5aa-3a260297c997-kube-api-access-xzg66\") pod \"heat-db-create-r9t2h\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.464619 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-7e2d-account-create-update-t5thb"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.466477 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.476376 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.481583 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7n8kf\" (UniqueName: \"kubernetes.io/projected/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-kube-api-access-7n8kf\") pod \"cinder-db-create-rh66n\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.481644 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-operator-scripts\") pod \"cinder-db-create-rh66n\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.481735 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-operator-scripts\") pod \"cinder-7e2d-account-create-update-t5thb\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.481799 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d584c\" (UniqueName: \"kubernetes.io/projected/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-kube-api-access-d584c\") pod \"cinder-7e2d-account-create-update-t5thb\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.482406 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-operator-scripts\") pod \"cinder-db-create-rh66n\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.483759 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-7e2d-account-create-update-t5thb"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.503925 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7n8kf\" (UniqueName: \"kubernetes.io/projected/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-kube-api-access-7n8kf\") pod \"cinder-db-create-rh66n\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.541474 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-4gf6b"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.544068 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.557901 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-4gf6b"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.572024 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-e9e9-account-create-update-5zvgd"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.580709 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.585294 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-operator-scripts\") pod \"cinder-7e2d-account-create-update-t5thb\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.585386 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d584c\" (UniqueName: \"kubernetes.io/projected/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-kube-api-access-d584c\") pod \"cinder-7e2d-account-create-update-t5thb\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.606061 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.606068 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-operator-scripts\") pod \"cinder-7e2d-account-create-update-t5thb\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.616629 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.621162 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d584c\" (UniqueName: \"kubernetes.io/projected/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-kube-api-access-d584c\") pod \"cinder-7e2d-account-create-update-t5thb\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.647117 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.676341 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7525f02f-3c52-4e75-bace-0d3e1bedeee8" path="/var/lib/kubelet/pods/7525f02f-3c52-4e75-bace-0d3e1bedeee8/volumes" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.677024 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e9e9-account-create-update-5zvgd"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.691170 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-68w28"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.692621 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.696181 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jvx7x" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.696342 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.698579 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.698748 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.703510 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-68w28"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.713189 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98a0462a-471a-4ed9-90a6-0e185f70b2bb-operator-scripts\") pod \"barbican-db-create-4gf6b\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.713501 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8npc5\" (UniqueName: \"kubernetes.io/projected/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-kube-api-access-8npc5\") pod \"barbican-e9e9-account-create-update-5zvgd\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.713569 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8khtv\" (UniqueName: \"kubernetes.io/projected/98a0462a-471a-4ed9-90a6-0e185f70b2bb-kube-api-access-8khtv\") pod \"barbican-db-create-4gf6b\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.713697 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-operator-scripts\") pod \"barbican-e9e9-account-create-update-5zvgd\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.716731 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-fd90-account-create-update-mb96s"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.721901 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.724209 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.745634 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-fd90-account-create-update-mb96s"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.790884 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.815861 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-config-data\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.815928 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8khtv\" (UniqueName: \"kubernetes.io/projected/98a0462a-471a-4ed9-90a6-0e185f70b2bb-kube-api-access-8khtv\") pod \"barbican-db-create-4gf6b\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.816016 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18e705d3-b57f-46e9-9c58-4a889bb49638-operator-scripts\") pod \"heat-fd90-account-create-update-mb96s\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.816057 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-operator-scripts\") pod \"barbican-e9e9-account-create-update-5zvgd\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.816084 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-combined-ca-bundle\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.816191 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7q9k\" (UniqueName: \"kubernetes.io/projected/eb7b2de7-60f5-4357-a6ec-21c925346c7f-kube-api-access-k7q9k\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.816223 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8s82\" (UniqueName: \"kubernetes.io/projected/18e705d3-b57f-46e9-9c58-4a889bb49638-kube-api-access-r8s82\") pod \"heat-fd90-account-create-update-mb96s\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.816254 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98a0462a-471a-4ed9-90a6-0e185f70b2bb-operator-scripts\") pod \"barbican-db-create-4gf6b\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.816321 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8npc5\" (UniqueName: \"kubernetes.io/projected/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-kube-api-access-8npc5\") pod \"barbican-e9e9-account-create-update-5zvgd\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.818088 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-operator-scripts\") pod \"barbican-e9e9-account-create-update-5zvgd\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.818758 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98a0462a-471a-4ed9-90a6-0e185f70b2bb-operator-scripts\") pod \"barbican-db-create-4gf6b\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.840851 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8khtv\" (UniqueName: \"kubernetes.io/projected/98a0462a-471a-4ed9-90a6-0e185f70b2bb-kube-api-access-8khtv\") pod \"barbican-db-create-4gf6b\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.841301 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8npc5\" (UniqueName: \"kubernetes.io/projected/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-kube-api-access-8npc5\") pod \"barbican-e9e9-account-create-update-5zvgd\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.885550 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.924657 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18e705d3-b57f-46e9-9c58-4a889bb49638-operator-scripts\") pod \"heat-fd90-account-create-update-mb96s\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.924722 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-combined-ca-bundle\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.924803 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7q9k\" (UniqueName: \"kubernetes.io/projected/eb7b2de7-60f5-4357-a6ec-21c925346c7f-kube-api-access-k7q9k\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.924832 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8s82\" (UniqueName: \"kubernetes.io/projected/18e705d3-b57f-46e9-9c58-4a889bb49638-kube-api-access-r8s82\") pod \"heat-fd90-account-create-update-mb96s\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.924903 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-config-data\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.925822 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-fklps"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.926973 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18e705d3-b57f-46e9-9c58-4a889bb49638-operator-scripts\") pod \"heat-fd90-account-create-update-mb96s\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.927495 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fklps" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.929942 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-combined-ca-bundle\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.930607 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-config-data\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.938738 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8a74-account-create-update-fzj69"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.940485 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.942485 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.947270 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fklps"] Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.950851 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8s82\" (UniqueName: \"kubernetes.io/projected/18e705d3-b57f-46e9-9c58-4a889bb49638-kube-api-access-r8s82\") pod \"heat-fd90-account-create-update-mb96s\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.964218 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7q9k\" (UniqueName: \"kubernetes.io/projected/eb7b2de7-60f5-4357-a6ec-21c925346c7f-kube-api-access-k7q9k\") pod \"keystone-db-sync-68w28\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:31 crc kubenswrapper[4689]: I0123 11:11:31.978817 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.015885 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8a74-account-create-update-fzj69"] Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.018975 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.026011 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7dbf\" (UniqueName: \"kubernetes.io/projected/1744ca79-20e6-40d0-8d71-699e88c7013d-kube-api-access-w7dbf\") pod \"neutron-db-create-fklps\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " pod="openstack/neutron-db-create-fklps" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.026055 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-operator-scripts\") pod \"neutron-8a74-account-create-update-fzj69\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.026115 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1744ca79-20e6-40d0-8d71-699e88c7013d-operator-scripts\") pod \"neutron-db-create-fklps\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " pod="openstack/neutron-db-create-fklps" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.026177 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv9dq\" (UniqueName: \"kubernetes.io/projected/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-kube-api-access-jv9dq\") pod \"neutron-8a74-account-create-update-fzj69\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.041806 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.128405 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7dbf\" (UniqueName: \"kubernetes.io/projected/1744ca79-20e6-40d0-8d71-699e88c7013d-kube-api-access-w7dbf\") pod \"neutron-db-create-fklps\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " pod="openstack/neutron-db-create-fklps" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.128476 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-operator-scripts\") pod \"neutron-8a74-account-create-update-fzj69\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.128565 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1744ca79-20e6-40d0-8d71-699e88c7013d-operator-scripts\") pod \"neutron-db-create-fklps\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " pod="openstack/neutron-db-create-fklps" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.128612 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv9dq\" (UniqueName: \"kubernetes.io/projected/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-kube-api-access-jv9dq\") pod \"neutron-8a74-account-create-update-fzj69\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.129744 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-operator-scripts\") pod \"neutron-8a74-account-create-update-fzj69\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.130257 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1744ca79-20e6-40d0-8d71-699e88c7013d-operator-scripts\") pod \"neutron-db-create-fklps\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " pod="openstack/neutron-db-create-fklps" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.144750 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7dbf\" (UniqueName: \"kubernetes.io/projected/1744ca79-20e6-40d0-8d71-699e88c7013d-kube-api-access-w7dbf\") pod \"neutron-db-create-fklps\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " pod="openstack/neutron-db-create-fklps" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.148512 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv9dq\" (UniqueName: \"kubernetes.io/projected/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-kube-api-access-jv9dq\") pod \"neutron-8a74-account-create-update-fzj69\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.282304 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fklps" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.289097 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.407236 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.410785 4689 scope.go:117] "RemoveContainer" containerID="acf99f6d01714c870d76ca8b6d3727de27773a6b7a39091d8fa7610262a22db9" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.435419 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blgb8\" (UniqueName: \"kubernetes.io/projected/3602ba50-13ae-492b-855d-9b7f51c6d398-kube-api-access-blgb8\") pod \"3602ba50-13ae-492b-855d-9b7f51c6d398\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.435698 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3602ba50-13ae-492b-855d-9b7f51c6d398-operator-scripts\") pod \"3602ba50-13ae-492b-855d-9b7f51c6d398\" (UID: \"3602ba50-13ae-492b-855d-9b7f51c6d398\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.437097 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3602ba50-13ae-492b-855d-9b7f51c6d398-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3602ba50-13ae-492b-855d-9b7f51c6d398" (UID: "3602ba50-13ae-492b-855d-9b7f51c6d398"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.440360 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3602ba50-13ae-492b-855d-9b7f51c6d398-kube-api-access-blgb8" (OuterVolumeSpecName: "kube-api-access-blgb8") pod "3602ba50-13ae-492b-855d-9b7f51c6d398" (UID: "3602ba50-13ae-492b-855d-9b7f51c6d398"). InnerVolumeSpecName "kube-api-access-blgb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.539781 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blgb8\" (UniqueName: \"kubernetes.io/projected/3602ba50-13ae-492b-855d-9b7f51c6d398-kube-api-access-blgb8\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.539818 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3602ba50-13ae-492b-855d-9b7f51c6d398-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.560104 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.570748 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.570939 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" event={"ID":"e72bd573-8bb9-43e3-8bce-26701d118894","Type":"ContainerDied","Data":"71d54d7538fd6b15a38898fc2bf9f6779fa0961a615aac899e4714a5ce937389"} Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.570967 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71d54d7538fd6b15a38898fc2bf9f6779fa0961a615aac899e4714a5ce937389" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.629757 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s4nwf-config-pz2gn" event={"ID":"7968dfee-57b2-4801-b7a1-661fe4bd7c2b","Type":"ContainerDied","Data":"9423219986e236ef0ed1eacbd914a737cb4fd99b182a110f2f0b4745dca3976d"} Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.629799 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9423219986e236ef0ed1eacbd914a737cb4fd99b182a110f2f0b4745dca3976d" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.629857 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s4nwf-config-pz2gn" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640212 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run\") pod \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640281 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-log-ovn\") pod \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640375 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqssk\" (UniqueName: \"kubernetes.io/projected/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-kube-api-access-cqssk\") pod \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640428 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run-ovn\") pod \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640460 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-scripts\") pod \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640496 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e72bd573-8bb9-43e3-8bce-26701d118894-operator-scripts\") pod \"e72bd573-8bb9-43e3-8bce-26701d118894\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640532 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgrcm\" (UniqueName: \"kubernetes.io/projected/e72bd573-8bb9-43e3-8bce-26701d118894-kube-api-access-wgrcm\") pod \"e72bd573-8bb9-43e3-8bce-26701d118894\" (UID: \"e72bd573-8bb9-43e3-8bce-26701d118894\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.640614 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-additional-scripts\") pod \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\" (UID: \"7968dfee-57b2-4801-b7a1-661fe4bd7c2b\") " Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.642353 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "7968dfee-57b2-4801-b7a1-661fe4bd7c2b" (UID: "7968dfee-57b2-4801-b7a1-661fe4bd7c2b"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.643473 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run" (OuterVolumeSpecName: "var-run") pod "7968dfee-57b2-4801-b7a1-661fe4bd7c2b" (UID: "7968dfee-57b2-4801-b7a1-661fe4bd7c2b"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.643507 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "7968dfee-57b2-4801-b7a1-661fe4bd7c2b" (UID: "7968dfee-57b2-4801-b7a1-661fe4bd7c2b"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.644182 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "7968dfee-57b2-4801-b7a1-661fe4bd7c2b" (UID: "7968dfee-57b2-4801-b7a1-661fe4bd7c2b"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.644605 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72bd573-8bb9-43e3-8bce-26701d118894-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e72bd573-8bb9-43e3-8bce-26701d118894" (UID: "e72bd573-8bb9-43e3-8bce-26701d118894"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.651927 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-kube-api-access-cqssk" (OuterVolumeSpecName: "kube-api-access-cqssk") pod "7968dfee-57b2-4801-b7a1-661fe4bd7c2b" (UID: "7968dfee-57b2-4801-b7a1-661fe4bd7c2b"). InnerVolumeSpecName "kube-api-access-cqssk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.652342 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-scripts" (OuterVolumeSpecName: "scripts") pod "7968dfee-57b2-4801-b7a1-661fe4bd7c2b" (UID: "7968dfee-57b2-4801-b7a1-661fe4bd7c2b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.660464 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" event={"ID":"3602ba50-13ae-492b-855d-9b7f51c6d398","Type":"ContainerDied","Data":"a6c26b9317cd0bbff8479b8d7f51a3597dfdcff87d838b38965e2b8e02e6b7e8"} Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.660507 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6c26b9317cd0bbff8479b8d7f51a3597dfdcff87d838b38965e2b8e02e6b7e8" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.660561 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-lh96s" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.667360 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e72bd573-8bb9-43e3-8bce-26701d118894-kube-api-access-wgrcm" (OuterVolumeSpecName: "kube-api-access-wgrcm") pod "e72bd573-8bb9-43e3-8bce-26701d118894" (UID: "e72bd573-8bb9-43e3-8bce-26701d118894"). InnerVolumeSpecName "kube-api-access-wgrcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.749923 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqssk\" (UniqueName: \"kubernetes.io/projected/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-kube-api-access-cqssk\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.749969 4689 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.749983 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.749995 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e72bd573-8bb9-43e3-8bce-26701d118894-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.750007 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgrcm\" (UniqueName: \"kubernetes.io/projected/e72bd573-8bb9-43e3-8bce-26701d118894-kube-api-access-wgrcm\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.750018 4689 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.750031 4689 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-run\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:32 crc kubenswrapper[4689]: I0123 11:11:32.750042 4689 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7968dfee-57b2-4801-b7a1-661fe4bd7c2b-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:33 crc kubenswrapper[4689]: I0123 11:11:33.221975 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:33 crc kubenswrapper[4689]: I0123 11:11:33.222027 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:33 crc kubenswrapper[4689]: I0123 11:11:33.664845 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-s4nwf-config-pz2gn"] Jan 23 11:11:33 crc kubenswrapper[4689]: I0123 11:11:33.673046 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-s4nwf-config-pz2gn"] Jan 23 11:11:33 crc kubenswrapper[4689]: I0123 11:11:33.683887 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-8e94-account-create-update-5f7ck" Jan 23 11:11:34 crc kubenswrapper[4689]: I0123 11:11:34.094081 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 23 11:11:34 crc kubenswrapper[4689]: I0123 11:11:34.719322 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-s4f4r" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="registry-server" probeResult="failure" output=< Jan 23 11:11:34 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:11:34 crc kubenswrapper[4689]: > Jan 23 11:11:35 crc kubenswrapper[4689]: I0123 11:11:35.651492 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7968dfee-57b2-4801-b7a1-661fe4bd7c2b" path="/var/lib/kubelet/pods/7968dfee-57b2-4801-b7a1-661fe4bd7c2b/volumes" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.213218 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:11:37 crc kubenswrapper[4689]: E0123 11:11:37.214264 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7968dfee-57b2-4801-b7a1-661fe4bd7c2b" containerName="ovn-config" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.214281 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7968dfee-57b2-4801-b7a1-661fe4bd7c2b" containerName="ovn-config" Jan 23 11:11:37 crc kubenswrapper[4689]: E0123 11:11:37.214347 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3602ba50-13ae-492b-855d-9b7f51c6d398" containerName="mariadb-database-create" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.214355 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3602ba50-13ae-492b-855d-9b7f51c6d398" containerName="mariadb-database-create" Jan 23 11:11:37 crc kubenswrapper[4689]: E0123 11:11:37.214367 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72bd573-8bb9-43e3-8bce-26701d118894" containerName="mariadb-account-create-update" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.214375 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72bd573-8bb9-43e3-8bce-26701d118894" containerName="mariadb-account-create-update" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.214672 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3602ba50-13ae-492b-855d-9b7f51c6d398" containerName="mariadb-database-create" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.214694 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7968dfee-57b2-4801-b7a1-661fe4bd7c2b" containerName="ovn-config" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.214755 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e72bd573-8bb9-43e3-8bce-26701d118894" containerName="mariadb-account-create-update" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.216080 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.218208 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.229470 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.257505 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44tvv\" (UniqueName: \"kubernetes.io/projected/d02f78f3-0cfc-48d4-8705-222943579cb0-kube-api-access-44tvv\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.257570 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.257593 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-config-data\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.359042 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44tvv\" (UniqueName: \"kubernetes.io/projected/d02f78f3-0cfc-48d4-8705-222943579cb0-kube-api-access-44tvv\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.359106 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.359129 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-config-data\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.365397 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.366123 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-config-data\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.379815 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44tvv\" (UniqueName: \"kubernetes.io/projected/d02f78f3-0cfc-48d4-8705-222943579cb0-kube-api-access-44tvv\") pod \"mysqld-exporter-0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " pod="openstack/mysqld-exporter-0" Jan 23 11:11:37 crc kubenswrapper[4689]: I0123 11:11:37.539031 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 23 11:11:42 crc kubenswrapper[4689]: E0123 11:11:42.174889 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Jan 23 11:11:42 crc kubenswrapper[4689]: E0123 11:11:42.175423 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hc2tn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-5tlkt_openstack(30a7d451-e70f-43cd-ae81-e5ccbdcb53f3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:11:42 crc kubenswrapper[4689]: E0123 11:11:42.176640 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-5tlkt" podUID="30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" Jan 23 11:11:42 crc kubenswrapper[4689]: I0123 11:11:42.817938 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-rh66n"] Jan 23 11:11:42 crc kubenswrapper[4689]: I0123 11:11:42.833321 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"bcdbb5135755c0d1c1235cb54ba76843d76d30ca821b87afa56baac72c8e2f37"} Jan 23 11:11:42 crc kubenswrapper[4689]: I0123 11:11:42.833674 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-4gf6b"] Jan 23 11:11:42 crc kubenswrapper[4689]: W0123 11:11:42.854610 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98a0462a_471a_4ed9_90a6_0e185f70b2bb.slice/crio-e290cb8ebc80dc93320b6f50d0527db9f0b94fb02f28519d75423f537d44dad5 WatchSource:0}: Error finding container e290cb8ebc80dc93320b6f50d0527db9f0b94fb02f28519d75423f537d44dad5: Status 404 returned error can't find the container with id e290cb8ebc80dc93320b6f50d0527db9f0b94fb02f28519d75423f537d44dad5 Jan 23 11:11:42 crc kubenswrapper[4689]: E0123 11:11:42.858108 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-5tlkt" podUID="30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.030224 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8a74-account-create-update-fzj69"] Jan 23 11:11:43 crc kubenswrapper[4689]: W0123 11:11:43.039985 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18e705d3_b57f_46e9_9c58_4a889bb49638.slice/crio-8889ceebde4d025c35539cc30d9f46aed5190497d7448ce93d6ad4d2fd102943 WatchSource:0}: Error finding container 8889ceebde4d025c35539cc30d9f46aed5190497d7448ce93d6ad4d2fd102943: Status 404 returned error can't find the container with id 8889ceebde4d025c35539cc30d9f46aed5190497d7448ce93d6ad4d2fd102943 Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.043854 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-fd90-account-create-update-mb96s"] Jan 23 11:11:43 crc kubenswrapper[4689]: W0123 11:11:43.052590 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a3222e1_02e3_44e3_ad01_ae2f033ceac3.slice/crio-eb8115827b8a01f488355afe438f66a8e61f1f196d6a74ca1a4c1f85db0e6849 WatchSource:0}: Error finding container eb8115827b8a01f488355afe438f66a8e61f1f196d6a74ca1a4c1f85db0e6849: Status 404 returned error can't find the container with id eb8115827b8a01f488355afe438f66a8e61f1f196d6a74ca1a4c1f85db0e6849 Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.054245 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-7e2d-account-create-update-t5thb"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.312674 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.425210 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.478125 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-68w28"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.561833 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.601993 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-r9t2h"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.614026 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.622575 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e9e9-account-create-update-5zvgd"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.635917 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fklps"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.656774 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4f4r"] Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.848854 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerStarted","Data":"660730e89c136696e7f35425618591f4e3e9acbd9e5b1cf36434f3d24ff74aa1"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.850151 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fklps" event={"ID":"1744ca79-20e6-40d0-8d71-699e88c7013d","Type":"ContainerStarted","Data":"fc4e35b3821cf7f1f3e640a0a0c99309da116dfa0d05ef5fd4651a94abce00c8"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.851767 4689 generic.go:334] "Generic (PLEG): container finished" podID="3dcba22c-2c98-4d8a-91c6-ce572778f1cb" containerID="1e7896a7475e31eb878cbeb40db4aae43db0d6a1b87f69e291ec1cf33c206a5a" exitCode=0 Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.851810 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8a74-account-create-update-fzj69" event={"ID":"3dcba22c-2c98-4d8a-91c6-ce572778f1cb","Type":"ContainerDied","Data":"1e7896a7475e31eb878cbeb40db4aae43db0d6a1b87f69e291ec1cf33c206a5a"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.851827 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8a74-account-create-update-fzj69" event={"ID":"3dcba22c-2c98-4d8a-91c6-ce572778f1cb","Type":"ContainerStarted","Data":"656baf0314bb77cb62d89333c499274d302dde977138a11aff048ad80702c98d"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.858417 4689 generic.go:334] "Generic (PLEG): container finished" podID="2a3222e1-02e3-44e3-ad01-ae2f033ceac3" containerID="a7a4725a9303f3380d60dd5c15a72dd74df6b104839ec08a02211bc9813fe5ad" exitCode=0 Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.858488 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7e2d-account-create-update-t5thb" event={"ID":"2a3222e1-02e3-44e3-ad01-ae2f033ceac3","Type":"ContainerDied","Data":"a7a4725a9303f3380d60dd5c15a72dd74df6b104839ec08a02211bc9813fe5ad"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.858517 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7e2d-account-create-update-t5thb" event={"ID":"2a3222e1-02e3-44e3-ad01-ae2f033ceac3","Type":"ContainerStarted","Data":"eb8115827b8a01f488355afe438f66a8e61f1f196d6a74ca1a4c1f85db0e6849"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.860330 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-r9t2h" event={"ID":"cc3173d9-5b9e-47fc-a5aa-3a260297c997","Type":"ContainerStarted","Data":"247f2be37bd3b07f34b619163b769f14ba6e0775da60ac4fcd844cfc51621594"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.861982 4689 generic.go:334] "Generic (PLEG): container finished" podID="98a0462a-471a-4ed9-90a6-0e185f70b2bb" containerID="d33a448b5a3cd50e82d1d994a8b9ff6284ec3b7305b426f184baec61300dab1d" exitCode=0 Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.862093 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4gf6b" event={"ID":"98a0462a-471a-4ed9-90a6-0e185f70b2bb","Type":"ContainerDied","Data":"d33a448b5a3cd50e82d1d994a8b9ff6284ec3b7305b426f184baec61300dab1d"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.862200 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4gf6b" event={"ID":"98a0462a-471a-4ed9-90a6-0e185f70b2bb","Type":"ContainerStarted","Data":"e290cb8ebc80dc93320b6f50d0527db9f0b94fb02f28519d75423f537d44dad5"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.864622 4689 generic.go:334] "Generic (PLEG): container finished" podID="5ac9258d-ea9b-4018-bb50-2767a4aabfd2" containerID="b0e0a3dbfc36e0051e797e03a7d1af5a0b8c3d4b3fa9a5b6dfd3aad62fc577e5" exitCode=0 Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.864757 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rh66n" event={"ID":"5ac9258d-ea9b-4018-bb50-2767a4aabfd2","Type":"ContainerDied","Data":"b0e0a3dbfc36e0051e797e03a7d1af5a0b8c3d4b3fa9a5b6dfd3aad62fc577e5"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.864800 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rh66n" event={"ID":"5ac9258d-ea9b-4018-bb50-2767a4aabfd2","Type":"ContainerStarted","Data":"2be918683069d825582d63defd7f0ef263fb3c4b0e9c37da96c95a4788f2d3a7"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.866606 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-68w28" event={"ID":"eb7b2de7-60f5-4357-a6ec-21c925346c7f","Type":"ContainerStarted","Data":"ddae5ed764c8e85ff13fa05ea973c2c9cc99f65c2ea6ea186073e75941ca0992"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.867613 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e9e9-account-create-update-5zvgd" event={"ID":"db4a1b6f-833b-4ef8-a55b-707d4a135fdb","Type":"ContainerStarted","Data":"c0ee31bd094017f039bba656a852b0cf6877bce437a7f546ccc762f1bfc9359f"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.869083 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"d02f78f3-0cfc-48d4-8705-222943579cb0","Type":"ContainerStarted","Data":"34e22b57eaa02df55ed3c7aedc9671be13cbe4c286dcc58a015889cc02ded57f"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.873347 4689 generic.go:334] "Generic (PLEG): container finished" podID="18e705d3-b57f-46e9-9c58-4a889bb49638" containerID="2d1d5315ba6ae205ba4ee47ad522498e36b6fe3d8d4cdb35bc502bc993f4e683" exitCode=0 Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.873452 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fd90-account-create-update-mb96s" event={"ID":"18e705d3-b57f-46e9-9c58-4a889bb49638","Type":"ContainerDied","Data":"2d1d5315ba6ae205ba4ee47ad522498e36b6fe3d8d4cdb35bc502bc993f4e683"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.873494 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fd90-account-create-update-mb96s" event={"ID":"18e705d3-b57f-46e9-9c58-4a889bb49638","Type":"ContainerStarted","Data":"8889ceebde4d025c35539cc30d9f46aed5190497d7448ce93d6ad4d2fd102943"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.880018 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"c31898a24594e298ed00a98a98aa110d89868d546140aa4b6b331560fe1275b7"} Jan 23 11:11:43 crc kubenswrapper[4689]: I0123 11:11:43.880061 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"6511277658e218ac74b7e00f24912d3177f6983010d99a66ffc2ee5b118ea796"} Jan 23 11:11:44 crc kubenswrapper[4689]: E0123 11:11:44.319262 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc3173d9_5b9e_47fc_a5aa_3a260297c997.slice/crio-6b089c8894446ce359c7f89f4395e5a23240a01be629445bef9d66d305ac36e5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc3173d9_5b9e_47fc_a5aa_3a260297c997.slice/crio-conmon-6b089c8894446ce359c7f89f4395e5a23240a01be629445bef9d66d305ac36e5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1744ca79_20e6_40d0_8d71_699e88c7013d.slice/crio-4313a23814c78f447f1b1cb9da4a0d48eb7f485c0ee51748ef4fa98de1a87454.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1744ca79_20e6_40d0_8d71_699e88c7013d.slice/crio-conmon-4313a23814c78f447f1b1cb9da4a0d48eb7f485c0ee51748ef4fa98de1a87454.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb4a1b6f_833b_4ef8_a55b_707d4a135fdb.slice/crio-8e918eb8aa6d378156b476e9476ed22b176b9ed2e62eee31b7b5f1c935fcbf53.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.899809 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"971ca56afacf2cf065f886bbd3c82dd5217e498302c81a8f44e9855bbeb764fc"} Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.900269 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"1a95ee4e525c77899f7af22b27bd92d59628c24b46b1aa78867bff2f36375a73"} Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.900289 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"96917259e987382fd3a0884e229b4210d2317497c10c12d2ddb3d4fcd5c59dce"} Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.902430 4689 generic.go:334] "Generic (PLEG): container finished" podID="1744ca79-20e6-40d0-8d71-699e88c7013d" containerID="4313a23814c78f447f1b1cb9da4a0d48eb7f485c0ee51748ef4fa98de1a87454" exitCode=0 Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.902468 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fklps" event={"ID":"1744ca79-20e6-40d0-8d71-699e88c7013d","Type":"ContainerDied","Data":"4313a23814c78f447f1b1cb9da4a0d48eb7f485c0ee51748ef4fa98de1a87454"} Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.921405 4689 generic.go:334] "Generic (PLEG): container finished" podID="cc3173d9-5b9e-47fc-a5aa-3a260297c997" containerID="6b089c8894446ce359c7f89f4395e5a23240a01be629445bef9d66d305ac36e5" exitCode=0 Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.921530 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-r9t2h" event={"ID":"cc3173d9-5b9e-47fc-a5aa-3a260297c997","Type":"ContainerDied","Data":"6b089c8894446ce359c7f89f4395e5a23240a01be629445bef9d66d305ac36e5"} Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.927750 4689 generic.go:334] "Generic (PLEG): container finished" podID="db4a1b6f-833b-4ef8-a55b-707d4a135fdb" containerID="8e918eb8aa6d378156b476e9476ed22b176b9ed2e62eee31b7b5f1c935fcbf53" exitCode=0 Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.927810 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e9e9-account-create-update-5zvgd" event={"ID":"db4a1b6f-833b-4ef8-a55b-707d4a135fdb","Type":"ContainerDied","Data":"8e918eb8aa6d378156b476e9476ed22b176b9ed2e62eee31b7b5f1c935fcbf53"} Jan 23 11:11:44 crc kubenswrapper[4689]: I0123 11:11:44.928400 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s4f4r" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="registry-server" containerID="cri-o://bfb1a1bc7ca07e3d7df7950723940e4b100dbaa2dc5fdb7419862d4cc47d96f8" gracePeriod=2 Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.696182 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.881118 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-operator-scripts\") pod \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.881392 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d584c\" (UniqueName: \"kubernetes.io/projected/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-kube-api-access-d584c\") pod \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\" (UID: \"2a3222e1-02e3-44e3-ad01-ae2f033ceac3\") " Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.882208 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2a3222e1-02e3-44e3-ad01-ae2f033ceac3" (UID: "2a3222e1-02e3-44e3-ad01-ae2f033ceac3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.888744 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-kube-api-access-d584c" (OuterVolumeSpecName: "kube-api-access-d584c") pod "2a3222e1-02e3-44e3-ad01-ae2f033ceac3" (UID: "2a3222e1-02e3-44e3-ad01-ae2f033ceac3"). InnerVolumeSpecName "kube-api-access-d584c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.939227 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"d02f78f3-0cfc-48d4-8705-222943579cb0","Type":"ContainerStarted","Data":"2a4d3a83def808df40246fcc8f8d7ee00f26f048d72b1c4f0eeee13a76ae11b7"} Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.944916 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8a74-account-create-update-fzj69" event={"ID":"3dcba22c-2c98-4d8a-91c6-ce572778f1cb","Type":"ContainerDied","Data":"656baf0314bb77cb62d89333c499274d302dde977138a11aff048ad80702c98d"} Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.944963 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="656baf0314bb77cb62d89333c499274d302dde977138a11aff048ad80702c98d" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.946854 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7e2d-account-create-update-t5thb" event={"ID":"2a3222e1-02e3-44e3-ad01-ae2f033ceac3","Type":"ContainerDied","Data":"eb8115827b8a01f488355afe438f66a8e61f1f196d6a74ca1a4c1f85db0e6849"} Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.946900 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb8115827b8a01f488355afe438f66a8e61f1f196d6a74ca1a4c1f85db0e6849" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.946872 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7e2d-account-create-update-t5thb" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.948257 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fd90-account-create-update-mb96s" event={"ID":"18e705d3-b57f-46e9-9c58-4a889bb49638","Type":"ContainerDied","Data":"8889ceebde4d025c35539cc30d9f46aed5190497d7448ce93d6ad4d2fd102943"} Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.948287 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8889ceebde4d025c35539cc30d9f46aed5190497d7448ce93d6ad4d2fd102943" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.958186 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.961855 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=7.351290959 podStartE2EDuration="8.961832921s" podCreationTimestamp="2026-01-23 11:11:37 +0000 UTC" firstStartedPulling="2026-01-23 11:11:43.481725043 +0000 UTC m=+1368.106404912" lastFinishedPulling="2026-01-23 11:11:45.092267015 +0000 UTC m=+1369.716946874" observedRunningTime="2026-01-23 11:11:45.955977618 +0000 UTC m=+1370.580657477" watchObservedRunningTime="2026-01-23 11:11:45.961832921 +0000 UTC m=+1370.586512790" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.965403 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"502e87fb-9e46-41c3-929e-c007018641db","Type":"ContainerStarted","Data":"f752e8fb61bc854c3767a5674226b12fd6e94164344072a31c875a17501b9c36"} Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.968882 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4gf6b" event={"ID":"98a0462a-471a-4ed9-90a6-0e185f70b2bb","Type":"ContainerDied","Data":"e290cb8ebc80dc93320b6f50d0527db9f0b94fb02f28519d75423f537d44dad5"} Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.968927 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e290cb8ebc80dc93320b6f50d0527db9f0b94fb02f28519d75423f537d44dad5" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.975240 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.977675 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rh66n" event={"ID":"5ac9258d-ea9b-4018-bb50-2767a4aabfd2","Type":"ContainerDied","Data":"2be918683069d825582d63defd7f0ef263fb3c4b0e9c37da96c95a4788f2d3a7"} Jan 23 11:11:45 crc kubenswrapper[4689]: I0123 11:11:45.977715 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2be918683069d825582d63defd7f0ef263fb3c4b0e9c37da96c95a4788f2d3a7" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:45.999965 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:45.999992 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d584c\" (UniqueName: \"kubernetes.io/projected/2a3222e1-02e3-44e3-ad01-ae2f033ceac3-kube-api-access-d584c\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.000812 4689 generic.go:334] "Generic (PLEG): container finished" podID="287c61f1-ab32-444f-a868-7f522f2a4916" containerID="bfb1a1bc7ca07e3d7df7950723940e4b100dbaa2dc5fdb7419862d4cc47d96f8" exitCode=0 Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.000976 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4f4r" event={"ID":"287c61f1-ab32-444f-a868-7f522f2a4916","Type":"ContainerDied","Data":"bfb1a1bc7ca07e3d7df7950723940e4b100dbaa2dc5fdb7419862d4cc47d96f8"} Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.001005 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4f4r" event={"ID":"287c61f1-ab32-444f-a868-7f522f2a4916","Type":"ContainerDied","Data":"b429db6ec2495fb2e0dd7ff7ce59dbe51baa4aeff86be6f21ace6a213830a5a5"} Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.001018 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b429db6ec2495fb2e0dd7ff7ce59dbe51baa4aeff86be6f21ace6a213830a5a5" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.055338 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.071726 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.072544 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.889344392 podStartE2EDuration="1m0.072524702s" podCreationTimestamp="2026-01-23 11:10:46 +0000 UTC" firstStartedPulling="2026-01-23 11:11:20.075291525 +0000 UTC m=+1344.699971384" lastFinishedPulling="2026-01-23 11:11:42.258471845 +0000 UTC m=+1366.883151694" observedRunningTime="2026-01-23 11:11:46.061344288 +0000 UTC m=+1370.686024157" watchObservedRunningTime="2026-01-23 11:11:46.072524702 +0000 UTC m=+1370.697204551" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.083822 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.102467 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98a0462a-471a-4ed9-90a6-0e185f70b2bb-operator-scripts\") pod \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.102516 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-operator-scripts\") pod \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.102559 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv9dq\" (UniqueName: \"kubernetes.io/projected/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-kube-api-access-jv9dq\") pod \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\" (UID: \"3dcba22c-2c98-4d8a-91c6-ce572778f1cb\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.102768 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8khtv\" (UniqueName: \"kubernetes.io/projected/98a0462a-471a-4ed9-90a6-0e185f70b2bb-kube-api-access-8khtv\") pod \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\" (UID: \"98a0462a-471a-4ed9-90a6-0e185f70b2bb\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.104623 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98a0462a-471a-4ed9-90a6-0e185f70b2bb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "98a0462a-471a-4ed9-90a6-0e185f70b2bb" (UID: "98a0462a-471a-4ed9-90a6-0e185f70b2bb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.105042 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3dcba22c-2c98-4d8a-91c6-ce572778f1cb" (UID: "3dcba22c-2c98-4d8a-91c6-ce572778f1cb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.177719 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98a0462a-471a-4ed9-90a6-0e185f70b2bb-kube-api-access-8khtv" (OuterVolumeSpecName: "kube-api-access-8khtv") pod "98a0462a-471a-4ed9-90a6-0e185f70b2bb" (UID: "98a0462a-471a-4ed9-90a6-0e185f70b2bb"). InnerVolumeSpecName "kube-api-access-8khtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.178627 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-kube-api-access-jv9dq" (OuterVolumeSpecName: "kube-api-access-jv9dq") pod "3dcba22c-2c98-4d8a-91c6-ce572778f1cb" (UID: "3dcba22c-2c98-4d8a-91c6-ce572778f1cb"). InnerVolumeSpecName "kube-api-access-jv9dq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.204843 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18e705d3-b57f-46e9-9c58-4a889bb49638-operator-scripts\") pod \"18e705d3-b57f-46e9-9c58-4a889bb49638\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.205359 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18e705d3-b57f-46e9-9c58-4a889bb49638-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "18e705d3-b57f-46e9-9c58-4a889bb49638" (UID: "18e705d3-b57f-46e9-9c58-4a889bb49638"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206051 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-utilities\") pod \"287c61f1-ab32-444f-a868-7f522f2a4916\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206110 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8s82\" (UniqueName: \"kubernetes.io/projected/18e705d3-b57f-46e9-9c58-4a889bb49638-kube-api-access-r8s82\") pod \"18e705d3-b57f-46e9-9c58-4a889bb49638\" (UID: \"18e705d3-b57f-46e9-9c58-4a889bb49638\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206134 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmnxn\" (UniqueName: \"kubernetes.io/projected/287c61f1-ab32-444f-a868-7f522f2a4916-kube-api-access-rmnxn\") pod \"287c61f1-ab32-444f-a868-7f522f2a4916\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206201 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7n8kf\" (UniqueName: \"kubernetes.io/projected/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-kube-api-access-7n8kf\") pod \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206220 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-catalog-content\") pod \"287c61f1-ab32-444f-a868-7f522f2a4916\" (UID: \"287c61f1-ab32-444f-a868-7f522f2a4916\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206312 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-operator-scripts\") pod \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\" (UID: \"5ac9258d-ea9b-4018-bb50-2767a4aabfd2\") " Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206823 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98a0462a-471a-4ed9-90a6-0e185f70b2bb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206843 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206852 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jv9dq\" (UniqueName: \"kubernetes.io/projected/3dcba22c-2c98-4d8a-91c6-ce572778f1cb-kube-api-access-jv9dq\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206863 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18e705d3-b57f-46e9-9c58-4a889bb49638-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.206872 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8khtv\" (UniqueName: \"kubernetes.io/projected/98a0462a-471a-4ed9-90a6-0e185f70b2bb-kube-api-access-8khtv\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.207500 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5ac9258d-ea9b-4018-bb50-2767a4aabfd2" (UID: "5ac9258d-ea9b-4018-bb50-2767a4aabfd2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.208119 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-utilities" (OuterVolumeSpecName: "utilities") pod "287c61f1-ab32-444f-a868-7f522f2a4916" (UID: "287c61f1-ab32-444f-a868-7f522f2a4916"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.211796 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-kube-api-access-7n8kf" (OuterVolumeSpecName: "kube-api-access-7n8kf") pod "5ac9258d-ea9b-4018-bb50-2767a4aabfd2" (UID: "5ac9258d-ea9b-4018-bb50-2767a4aabfd2"). InnerVolumeSpecName "kube-api-access-7n8kf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.211830 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18e705d3-b57f-46e9-9c58-4a889bb49638-kube-api-access-r8s82" (OuterVolumeSpecName: "kube-api-access-r8s82") pod "18e705d3-b57f-46e9-9c58-4a889bb49638" (UID: "18e705d3-b57f-46e9-9c58-4a889bb49638"). InnerVolumeSpecName "kube-api-access-r8s82". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.212063 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/287c61f1-ab32-444f-a868-7f522f2a4916-kube-api-access-rmnxn" (OuterVolumeSpecName: "kube-api-access-rmnxn") pod "287c61f1-ab32-444f-a868-7f522f2a4916" (UID: "287c61f1-ab32-444f-a868-7f522f2a4916"). InnerVolumeSpecName "kube-api-access-rmnxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.229071 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "287c61f1-ab32-444f-a868-7f522f2a4916" (UID: "287c61f1-ab32-444f-a868-7f522f2a4916"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.309608 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.309640 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.309650 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8s82\" (UniqueName: \"kubernetes.io/projected/18e705d3-b57f-46e9-9c58-4a889bb49638-kube-api-access-r8s82\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.309661 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmnxn\" (UniqueName: \"kubernetes.io/projected/287c61f1-ab32-444f-a868-7f522f2a4916-kube-api-access-rmnxn\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.309686 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7n8kf\" (UniqueName: \"kubernetes.io/projected/5ac9258d-ea9b-4018-bb50-2767a4aabfd2-kube-api-access-7n8kf\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.309694 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/287c61f1-ab32-444f-a868-7f522f2a4916-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.377689 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-w4bpf"] Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.392069 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a3222e1-02e3-44e3-ad01-ae2f033ceac3" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.392439 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a3222e1-02e3-44e3-ad01-ae2f033ceac3" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.392518 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="registry-server" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.392584 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="registry-server" Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.392671 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dcba22c-2c98-4d8a-91c6-ce572778f1cb" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.392726 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dcba22c-2c98-4d8a-91c6-ce572778f1cb" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.392816 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac9258d-ea9b-4018-bb50-2767a4aabfd2" containerName="mariadb-database-create" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.392877 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac9258d-ea9b-4018-bb50-2767a4aabfd2" containerName="mariadb-database-create" Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.392946 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="extract-utilities" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393015 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="extract-utilities" Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.393103 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98a0462a-471a-4ed9-90a6-0e185f70b2bb" containerName="mariadb-database-create" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393185 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="98a0462a-471a-4ed9-90a6-0e185f70b2bb" containerName="mariadb-database-create" Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.393253 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="extract-content" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393309 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="extract-content" Jan 23 11:11:46 crc kubenswrapper[4689]: E0123 11:11:46.393388 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18e705d3-b57f-46e9-9c58-4a889bb49638" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393454 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="18e705d3-b57f-46e9-9c58-4a889bb49638" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393767 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="98a0462a-471a-4ed9-90a6-0e185f70b2bb" containerName="mariadb-database-create" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393846 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dcba22c-2c98-4d8a-91c6-ce572778f1cb" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393913 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="18e705d3-b57f-46e9-9c58-4a889bb49638" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.393980 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" containerName="registry-server" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.394039 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a3222e1-02e3-44e3-ad01-ae2f033ceac3" containerName="mariadb-account-create-update" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.394100 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac9258d-ea9b-4018-bb50-2767a4aabfd2" containerName="mariadb-database-create" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.395228 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-w4bpf"] Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.395389 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.398371 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.516638 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp762\" (UniqueName: \"kubernetes.io/projected/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-kube-api-access-qp762\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.516956 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.517140 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.517521 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.517637 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.517805 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-config\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.620687 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.621021 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.621060 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.621165 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-config\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.621283 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp762\" (UniqueName: \"kubernetes.io/projected/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-kube-api-access-qp762\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.621454 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.621768 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.622224 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.622488 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.622656 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.622854 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-config\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.641265 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp762\" (UniqueName: \"kubernetes.io/projected/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-kube-api-access-qp762\") pod \"dnsmasq-dns-6d5b6d6b67-w4bpf\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:46 crc kubenswrapper[4689]: I0123 11:11:46.698100 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.015030 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fd90-account-create-update-mb96s" Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.015560 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a74-account-create-update-fzj69" Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.015566 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerStarted","Data":"5603271eae588ad5ea1982114ce847f2170824bae35346bd9efbca45a7fbc652"} Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.015735 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4f4r" Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.015755 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4gf6b" Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.015910 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rh66n" Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.159343 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4f4r"] Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.172501 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4f4r"] Jan 23 11:11:47 crc kubenswrapper[4689]: I0123 11:11:47.654883 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="287c61f1-ab32-444f-a868-7f522f2a4916" path="/var/lib/kubelet/pods/287c61f1-ab32-444f-a868-7f522f2a4916/volumes" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.037450 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-r9t2h" event={"ID":"cc3173d9-5b9e-47fc-a5aa-3a260297c997","Type":"ContainerDied","Data":"247f2be37bd3b07f34b619163b769f14ba6e0775da60ac4fcd844cfc51621594"} Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.038248 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="247f2be37bd3b07f34b619163b769f14ba6e0775da60ac4fcd844cfc51621594" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.043441 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e9e9-account-create-update-5zvgd" event={"ID":"db4a1b6f-833b-4ef8-a55b-707d4a135fdb","Type":"ContainerDied","Data":"c0ee31bd094017f039bba656a852b0cf6877bce437a7f546ccc762f1bfc9359f"} Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.043496 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0ee31bd094017f039bba656a852b0cf6877bce437a7f546ccc762f1bfc9359f" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.050504 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fklps" event={"ID":"1744ca79-20e6-40d0-8d71-699e88c7013d","Type":"ContainerDied","Data":"fc4e35b3821cf7f1f3e640a0a0c99309da116dfa0d05ef5fd4651a94abce00c8"} Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.050539 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc4e35b3821cf7f1f3e640a0a0c99309da116dfa0d05ef5fd4651a94abce00c8" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.258353 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fklps" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.283025 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.325589 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.381664 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-operator-scripts\") pod \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.381783 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1744ca79-20e6-40d0-8d71-699e88c7013d-operator-scripts\") pod \"1744ca79-20e6-40d0-8d71-699e88c7013d\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.381873 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzg66\" (UniqueName: \"kubernetes.io/projected/cc3173d9-5b9e-47fc-a5aa-3a260297c997-kube-api-access-xzg66\") pod \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.381904 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7dbf\" (UniqueName: \"kubernetes.io/projected/1744ca79-20e6-40d0-8d71-699e88c7013d-kube-api-access-w7dbf\") pod \"1744ca79-20e6-40d0-8d71-699e88c7013d\" (UID: \"1744ca79-20e6-40d0-8d71-699e88c7013d\") " Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.381949 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8npc5\" (UniqueName: \"kubernetes.io/projected/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-kube-api-access-8npc5\") pod \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\" (UID: \"db4a1b6f-833b-4ef8-a55b-707d4a135fdb\") " Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.382125 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3173d9-5b9e-47fc-a5aa-3a260297c997-operator-scripts\") pod \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\" (UID: \"cc3173d9-5b9e-47fc-a5aa-3a260297c997\") " Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.384370 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc3173d9-5b9e-47fc-a5aa-3a260297c997-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc3173d9-5b9e-47fc-a5aa-3a260297c997" (UID: "cc3173d9-5b9e-47fc-a5aa-3a260297c997"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.384854 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "db4a1b6f-833b-4ef8-a55b-707d4a135fdb" (UID: "db4a1b6f-833b-4ef8-a55b-707d4a135fdb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.386379 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1744ca79-20e6-40d0-8d71-699e88c7013d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1744ca79-20e6-40d0-8d71-699e88c7013d" (UID: "1744ca79-20e6-40d0-8d71-699e88c7013d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.391879 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1744ca79-20e6-40d0-8d71-699e88c7013d-kube-api-access-w7dbf" (OuterVolumeSpecName: "kube-api-access-w7dbf") pod "1744ca79-20e6-40d0-8d71-699e88c7013d" (UID: "1744ca79-20e6-40d0-8d71-699e88c7013d"). InnerVolumeSpecName "kube-api-access-w7dbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.392540 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc3173d9-5b9e-47fc-a5aa-3a260297c997-kube-api-access-xzg66" (OuterVolumeSpecName: "kube-api-access-xzg66") pod "cc3173d9-5b9e-47fc-a5aa-3a260297c997" (UID: "cc3173d9-5b9e-47fc-a5aa-3a260297c997"). InnerVolumeSpecName "kube-api-access-xzg66". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.396260 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-kube-api-access-8npc5" (OuterVolumeSpecName: "kube-api-access-8npc5") pod "db4a1b6f-833b-4ef8-a55b-707d4a135fdb" (UID: "db4a1b6f-833b-4ef8-a55b-707d4a135fdb"). InnerVolumeSpecName "kube-api-access-8npc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.484434 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzg66\" (UniqueName: \"kubernetes.io/projected/cc3173d9-5b9e-47fc-a5aa-3a260297c997-kube-api-access-xzg66\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.484468 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7dbf\" (UniqueName: \"kubernetes.io/projected/1744ca79-20e6-40d0-8d71-699e88c7013d-kube-api-access-w7dbf\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.484477 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8npc5\" (UniqueName: \"kubernetes.io/projected/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-kube-api-access-8npc5\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.484487 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3173d9-5b9e-47fc-a5aa-3a260297c997-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.484496 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4a1b6f-833b-4ef8-a55b-707d4a135fdb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.484505 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1744ca79-20e6-40d0-8d71-699e88c7013d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:49 crc kubenswrapper[4689]: W0123 11:11:49.588449 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31900e42_2a38_4b40_b8dc_7b8b8fb9e41e.slice/crio-f6014d2d20a7507e480a23b11bf877b2a27a9d5dfeabd4be02e14d7a549aa5c5 WatchSource:0}: Error finding container f6014d2d20a7507e480a23b11bf877b2a27a9d5dfeabd4be02e14d7a549aa5c5: Status 404 returned error can't find the container with id f6014d2d20a7507e480a23b11bf877b2a27a9d5dfeabd4be02e14d7a549aa5c5 Jan 23 11:11:49 crc kubenswrapper[4689]: I0123 11:11:49.591552 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-w4bpf"] Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.064438 4689 generic.go:334] "Generic (PLEG): container finished" podID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerID="702b0533a10dcfa1a60ef733324866399bbfec17ef96deb421f222ded3b043f0" exitCode=0 Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.064541 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" event={"ID":"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e","Type":"ContainerDied","Data":"702b0533a10dcfa1a60ef733324866399bbfec17ef96deb421f222ded3b043f0"} Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.064888 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" event={"ID":"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e","Type":"ContainerStarted","Data":"f6014d2d20a7507e480a23b11bf877b2a27a9d5dfeabd4be02e14d7a549aa5c5"} Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.067743 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-68w28" event={"ID":"eb7b2de7-60f5-4357-a6ec-21c925346c7f","Type":"ContainerStarted","Data":"415abc379391f653942dc6239ef70369f0f0988cf5396b2f475091696322fbe7"} Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.067808 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-r9t2h" Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.067845 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e9e9-account-create-update-5zvgd" Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.067860 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fklps" Jan 23 11:11:50 crc kubenswrapper[4689]: I0123 11:11:50.126290 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-68w28" podStartSLOduration=13.474620864 podStartE2EDuration="19.126264755s" podCreationTimestamp="2026-01-23 11:11:31 +0000 UTC" firstStartedPulling="2026-01-23 11:11:43.453020211 +0000 UTC m=+1368.077700070" lastFinishedPulling="2026-01-23 11:11:49.104664082 +0000 UTC m=+1373.729343961" observedRunningTime="2026-01-23 11:11:50.117313732 +0000 UTC m=+1374.741993631" watchObservedRunningTime="2026-01-23 11:11:50.126264755 +0000 UTC m=+1374.750944644" Jan 23 11:11:51 crc kubenswrapper[4689]: I0123 11:11:51.084803 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" event={"ID":"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e","Type":"ContainerStarted","Data":"8c924dffaa2402963ed5bf7f44c8e8f36c277a9b9e077faead9c3ec6554f2fe4"} Jan 23 11:11:51 crc kubenswrapper[4689]: I0123 11:11:51.115681 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" podStartSLOduration=5.115660598 podStartE2EDuration="5.115660598s" podCreationTimestamp="2026-01-23 11:11:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:51.107668266 +0000 UTC m=+1375.732348125" watchObservedRunningTime="2026-01-23 11:11:51.115660598 +0000 UTC m=+1375.740340457" Jan 23 11:11:51 crc kubenswrapper[4689]: I0123 11:11:51.698732 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:53 crc kubenswrapper[4689]: I0123 11:11:53.107838 4689 generic.go:334] "Generic (PLEG): container finished" podID="eb7b2de7-60f5-4357-a6ec-21c925346c7f" containerID="415abc379391f653942dc6239ef70369f0f0988cf5396b2f475091696322fbe7" exitCode=0 Jan 23 11:11:53 crc kubenswrapper[4689]: I0123 11:11:53.107933 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-68w28" event={"ID":"eb7b2de7-60f5-4357-a6ec-21c925346c7f","Type":"ContainerDied","Data":"415abc379391f653942dc6239ef70369f0f0988cf5396b2f475091696322fbe7"} Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.119842 4689 generic.go:334] "Generic (PLEG): container finished" podID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerID="5603271eae588ad5ea1982114ce847f2170824bae35346bd9efbca45a7fbc652" exitCode=0 Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.119955 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerDied","Data":"5603271eae588ad5ea1982114ce847f2170824bae35346bd9efbca45a7fbc652"} Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.515018 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.622993 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7q9k\" (UniqueName: \"kubernetes.io/projected/eb7b2de7-60f5-4357-a6ec-21c925346c7f-kube-api-access-k7q9k\") pod \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.623058 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-config-data\") pod \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.623163 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-combined-ca-bundle\") pod \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\" (UID: \"eb7b2de7-60f5-4357-a6ec-21c925346c7f\") " Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.627599 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb7b2de7-60f5-4357-a6ec-21c925346c7f-kube-api-access-k7q9k" (OuterVolumeSpecName: "kube-api-access-k7q9k") pod "eb7b2de7-60f5-4357-a6ec-21c925346c7f" (UID: "eb7b2de7-60f5-4357-a6ec-21c925346c7f"). InnerVolumeSpecName "kube-api-access-k7q9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.649222 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb7b2de7-60f5-4357-a6ec-21c925346c7f" (UID: "eb7b2de7-60f5-4357-a6ec-21c925346c7f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.674487 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-config-data" (OuterVolumeSpecName: "config-data") pod "eb7b2de7-60f5-4357-a6ec-21c925346c7f" (UID: "eb7b2de7-60f5-4357-a6ec-21c925346c7f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.733222 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7q9k\" (UniqueName: \"kubernetes.io/projected/eb7b2de7-60f5-4357-a6ec-21c925346c7f-kube-api-access-k7q9k\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.733258 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:54 crc kubenswrapper[4689]: I0123 11:11:54.733272 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb7b2de7-60f5-4357-a6ec-21c925346c7f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.134455 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerStarted","Data":"ca718988dcb3cd7101ef11ad17e6b71775bccaa96d02175dba36c85f9a2cc7f7"} Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.139417 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-68w28" event={"ID":"eb7b2de7-60f5-4357-a6ec-21c925346c7f","Type":"ContainerDied","Data":"ddae5ed764c8e85ff13fa05ea973c2c9cc99f65c2ea6ea186073e75941ca0992"} Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.139465 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddae5ed764c8e85ff13fa05ea973c2c9cc99f65c2ea6ea186073e75941ca0992" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.139527 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-68w28" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.431069 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-pwmcq"] Jan 23 11:11:55 crc kubenswrapper[4689]: E0123 11:11:55.432688 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db4a1b6f-833b-4ef8-a55b-707d4a135fdb" containerName="mariadb-account-create-update" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.432735 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="db4a1b6f-833b-4ef8-a55b-707d4a135fdb" containerName="mariadb-account-create-update" Jan 23 11:11:55 crc kubenswrapper[4689]: E0123 11:11:55.432821 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1744ca79-20e6-40d0-8d71-699e88c7013d" containerName="mariadb-database-create" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.432831 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="1744ca79-20e6-40d0-8d71-699e88c7013d" containerName="mariadb-database-create" Jan 23 11:11:55 crc kubenswrapper[4689]: E0123 11:11:55.432856 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc3173d9-5b9e-47fc-a5aa-3a260297c997" containerName="mariadb-database-create" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.432865 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc3173d9-5b9e-47fc-a5aa-3a260297c997" containerName="mariadb-database-create" Jan 23 11:11:55 crc kubenswrapper[4689]: E0123 11:11:55.432905 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb7b2de7-60f5-4357-a6ec-21c925346c7f" containerName="keystone-db-sync" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.432916 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb7b2de7-60f5-4357-a6ec-21c925346c7f" containerName="keystone-db-sync" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.433549 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="1744ca79-20e6-40d0-8d71-699e88c7013d" containerName="mariadb-database-create" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.433575 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb7b2de7-60f5-4357-a6ec-21c925346c7f" containerName="keystone-db-sync" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.433620 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc3173d9-5b9e-47fc-a5aa-3a260297c997" containerName="mariadb-database-create" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.433638 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="db4a1b6f-833b-4ef8-a55b-707d4a135fdb" containerName="mariadb-account-create-update" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.435697 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.443073 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.443190 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.443526 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.443748 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jvx7x" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.451546 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.473565 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-w4bpf"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.473820 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerName="dnsmasq-dns" containerID="cri-o://8c924dffaa2402963ed5bf7f44c8e8f36c277a9b9e077faead9c3ec6554f2fe4" gracePeriod=10 Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.525102 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.531175 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pwmcq"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.554003 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-fernet-keys\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.554056 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-credential-keys\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.554159 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk97c\" (UniqueName: \"kubernetes.io/projected/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-kube-api-access-nk97c\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.554214 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-config-data\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.554251 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-scripts\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.554291 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-combined-ca-bundle\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.569769 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-hvkqz"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.571687 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.616274 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-hvkqz"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.663328 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.663432 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgvgr\" (UniqueName: \"kubernetes.io/projected/f071329d-803d-4cd2-b338-1fc5743c89ff-kube-api-access-jgvgr\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.663505 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk97c\" (UniqueName: \"kubernetes.io/projected/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-kube-api-access-nk97c\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.663724 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.663749 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.663867 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-config\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.663899 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-config-data\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.667763 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-scripts\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.667955 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-combined-ca-bundle\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.668100 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.668182 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-fernet-keys\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.668262 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-credential-keys\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.677734 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.677785 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.677887 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.690504 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-config-data\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.696978 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-combined-ca-bundle\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.700215 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-credential-keys\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.707617 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-scripts\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.710160 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-bvhc5"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.711472 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.715133 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk97c\" (UniqueName: \"kubernetes.io/projected/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-kube-api-access-nk97c\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.718935 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.719107 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-5trhz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.723874 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-bvhc5"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.727810 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-fernet-keys\") pod \"keystone-bootstrap-pwmcq\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.786871 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.786956 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.786982 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-config\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.787022 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spk7c\" (UniqueName: \"kubernetes.io/projected/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-kube-api-access-spk7c\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.787200 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.787242 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-config-data\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.787268 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-combined-ca-bundle\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.787329 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.787401 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgvgr\" (UniqueName: \"kubernetes.io/projected/f071329d-803d-4cd2-b338-1fc5743c89ff-kube-api-access-jgvgr\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.788879 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.789677 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.792318 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-config\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.792563 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.793270 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.801052 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jvx7x" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.803954 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.833790 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-b6958"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.835172 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.841387 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.841895 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.847351 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-b6958"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.847574 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-h7trg" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.860493 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgvgr\" (UniqueName: \"kubernetes.io/projected/f071329d-803d-4cd2-b338-1fc5743c89ff-kube-api-access-jgvgr\") pod \"dnsmasq-dns-6f8c45789f-hvkqz\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.895398 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-config-data\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.895444 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-combined-ca-bundle\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.895554 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spk7c\" (UniqueName: \"kubernetes.io/projected/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-kube-api-access-spk7c\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.897649 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-lr5lm"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.899073 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.903716 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.904337 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-jps47" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.904488 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.912903 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-combined-ca-bundle\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.917786 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-cn9tn"] Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.919158 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.924062 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-config-data\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.924581 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-9sfnl" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.924823 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.945140 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spk7c\" (UniqueName: \"kubernetes.io/projected/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-kube-api-access-spk7c\") pod \"heat-db-sync-bvhc5\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:55 crc kubenswrapper[4689]: I0123 11:11:55.951794 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lr5lm"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999274 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-scripts\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999397 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-config\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999586 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-db-sync-config-data\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999618 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfgwf\" (UniqueName: \"kubernetes.io/projected/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-kube-api-access-pfgwf\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999644 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-db-sync-config-data\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999729 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b74dafce-64c6-4c46-886b-bdc4044b9b1e-etc-machine-id\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999745 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8fh4\" (UniqueName: \"kubernetes.io/projected/c1455952-eacb-400c-bb65-d9d6ca95a674-kube-api-access-p8fh4\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999839 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-config-data\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:55.999866 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-combined-ca-bundle\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.000311 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-combined-ca-bundle\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.000663 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbg45\" (UniqueName: \"kubernetes.io/projected/b74dafce-64c6-4c46-886b-bdc4044b9b1e-kube-api-access-lbg45\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.000695 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-combined-ca-bundle\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102343 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-combined-ca-bundle\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102441 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbg45\" (UniqueName: \"kubernetes.io/projected/b74dafce-64c6-4c46-886b-bdc4044b9b1e-kube-api-access-lbg45\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102484 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-combined-ca-bundle\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102579 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-scripts\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102612 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-config\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102684 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-db-sync-config-data\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102712 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfgwf\" (UniqueName: \"kubernetes.io/projected/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-kube-api-access-pfgwf\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102739 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-db-sync-config-data\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102793 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b74dafce-64c6-4c46-886b-bdc4044b9b1e-etc-machine-id\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102815 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8fh4\" (UniqueName: \"kubernetes.io/projected/c1455952-eacb-400c-bb65-d9d6ca95a674-kube-api-access-p8fh4\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102872 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-config-data\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.102905 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-combined-ca-bundle\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.103280 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b74dafce-64c6-4c46-886b-bdc4044b9b1e-etc-machine-id\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.136838 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.170312 4689 generic.go:334] "Generic (PLEG): container finished" podID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerID="8c924dffaa2402963ed5bf7f44c8e8f36c277a9b9e077faead9c3ec6554f2fe4" exitCode=0 Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.170366 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" event={"ID":"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e","Type":"ContainerDied","Data":"8c924dffaa2402963ed5bf7f44c8e8f36c277a9b9e077faead9c3ec6554f2fe4"} Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.191673 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-combined-ca-bundle\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.191768 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbg45\" (UniqueName: \"kubernetes.io/projected/b74dafce-64c6-4c46-886b-bdc4044b9b1e-kube-api-access-lbg45\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.191981 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-config-data\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.192851 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-combined-ca-bundle\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.194085 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-db-sync-config-data\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.197090 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-config\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.197745 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-combined-ca-bundle\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.198436 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8fh4\" (UniqueName: \"kubernetes.io/projected/c1455952-eacb-400c-bb65-d9d6ca95a674-kube-api-access-p8fh4\") pod \"neutron-db-sync-b6958\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.199327 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfgwf\" (UniqueName: \"kubernetes.io/projected/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-kube-api-access-pfgwf\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.199535 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-scripts\") pod \"cinder-db-sync-lr5lm\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.199975 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-db-sync-config-data\") pod \"barbican-db-sync-cn9tn\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.231556 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bvhc5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.248963 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-cn9tn"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.262919 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-b6958" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.273294 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.275653 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.280387 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.282963 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.283379 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.285658 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.301440 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-hvkqz"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.377339 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.418231 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.418574 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k86fs\" (UniqueName: \"kubernetes.io/projected/82880b13-2692-46b7-a0b0-fb06b87426dd-kube-api-access-k86fs\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.418603 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-config-data\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.418671 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-log-httpd\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.418734 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.418752 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-run-httpd\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.418774 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-scripts\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.434409 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-6cst5"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.436281 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.467198 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-6cst5"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.507552 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-hm599"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.509073 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.512503 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.512559 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-fgc9f" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.512674 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.520948 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-config\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521009 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521033 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-run-httpd\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521072 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-scripts\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521092 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521121 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq7pp\" (UniqueName: \"kubernetes.io/projected/22d13f4a-d514-4739-b9f5-bcb6107dd167-kube-api-access-wq7pp\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521170 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521213 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k86fs\" (UniqueName: \"kubernetes.io/projected/82880b13-2692-46b7-a0b0-fb06b87426dd-kube-api-access-k86fs\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521235 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521255 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521274 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-config-data\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521292 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521349 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-log-httpd\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.521827 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-log-httpd\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.526412 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-hm599"] Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.536763 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-run-httpd\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.583089 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-config-data\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.583223 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k86fs\" (UniqueName: \"kubernetes.io/projected/82880b13-2692-46b7-a0b0-fb06b87426dd-kube-api-access-k86fs\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.583820 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.584398 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-scripts\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.584691 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.627560 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-combined-ca-bundle\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.627654 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-logs\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.627712 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.627749 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.627808 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.627946 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-config-data\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.628008 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-config\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.628054 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nds7\" (UniqueName: \"kubernetes.io/projected/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-kube-api-access-6nds7\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.628089 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-scripts\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.628120 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.628313 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq7pp\" (UniqueName: \"kubernetes.io/projected/22d13f4a-d514-4739-b9f5-bcb6107dd167-kube-api-access-wq7pp\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.629837 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.630348 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.630920 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-config\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.636650 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.637200 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.654456 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq7pp\" (UniqueName: \"kubernetes.io/projected/22d13f4a-d514-4739-b9f5-bcb6107dd167-kube-api-access-wq7pp\") pod \"dnsmasq-dns-fcfdd6f9f-6cst5\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.697884 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.698923 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.176:5353: connect: connection refused" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.733926 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nds7\" (UniqueName: \"kubernetes.io/projected/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-kube-api-access-6nds7\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.743667 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-scripts\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.743696 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-scripts\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.743891 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-combined-ca-bundle\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.743983 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-logs\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.744368 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-config-data\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.745304 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-logs\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.747619 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-config-data\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.747860 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-combined-ca-bundle\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.761709 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nds7\" (UniqueName: \"kubernetes.io/projected/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-kube-api-access-6nds7\") pod \"placement-db-sync-hm599\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " pod="openstack/placement-db-sync-hm599" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.900877 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:11:56 crc kubenswrapper[4689]: I0123 11:11:56.929928 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hm599" Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.039015 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pwmcq"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.055468 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.182023 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pwmcq" event={"ID":"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341","Type":"ContainerStarted","Data":"f689d580735c419812c71858e133c6237e98430e1669900391e00d3bd8c91ba2"} Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.509849 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-bvhc5"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.533361 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lr5lm"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.549966 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-b6958"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.574799 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-hvkqz"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.659073 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.687851 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.724659 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-cn9tn"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.898897 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-hm599"] Jan 23 11:11:57 crc kubenswrapper[4689]: I0123 11:11:57.932884 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-6cst5"] Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.314433 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerStarted","Data":"956b94bfb578bbb7610d5879d8ecc50c1c9a71b3da416406adf302b9686aad74"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.320061 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hm599" event={"ID":"a5e5ddbf-b676-44a1-996e-a6aafe2280e5","Type":"ContainerStarted","Data":"39cafe709aa278dc40bb68f17fa6f983509b5b09a1eb738254032faa1ba12a29"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.327516 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-b6958" event={"ID":"c1455952-eacb-400c-bb65-d9d6ca95a674","Type":"ContainerStarted","Data":"390a1755d4c44f561a50ea749baa8e36f0681bd1a5e7f51c9c5511eddbf23124"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.327591 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-b6958" event={"ID":"c1455952-eacb-400c-bb65-d9d6ca95a674","Type":"ContainerStarted","Data":"0d382be03615399c2ccb8d7d847deeae7615701e18b92a45e1b9c728a5e30195"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.333747 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pwmcq" event={"ID":"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341","Type":"ContainerStarted","Data":"b62357a0edaa657342ce222555b749c18cdd4c1de955d1d28e60f15ab02b2887"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.336960 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerStarted","Data":"205b59abdc76e7a045d76c80c3c580754699be698371b968e4a968e33181c7a6"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.355757 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-b6958" podStartSLOduration=3.355738767 podStartE2EDuration="3.355738767s" podCreationTimestamp="2026-01-23 11:11:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:58.344161985 +0000 UTC m=+1382.968841834" watchObservedRunningTime="2026-01-23 11:11:58.355738767 +0000 UTC m=+1382.980418626" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.357262 4689 generic.go:334] "Generic (PLEG): container finished" podID="f071329d-803d-4cd2-b338-1fc5743c89ff" containerID="e25213be59b1da2aba1ad933a3495bb124cac1187b26ba171377c97355ea783b" exitCode=0 Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.357363 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" event={"ID":"f071329d-803d-4cd2-b338-1fc5743c89ff","Type":"ContainerDied","Data":"e25213be59b1da2aba1ad933a3495bb124cac1187b26ba171377c97355ea783b"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.357443 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" event={"ID":"f071329d-803d-4cd2-b338-1fc5743c89ff","Type":"ContainerStarted","Data":"8b5badf03e63c5f90177b898c88b8cfdca8607be8336f37c58712a73d0419194"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.365594 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-pwmcq" podStartSLOduration=3.365573271 podStartE2EDuration="3.365573271s" podCreationTimestamp="2026-01-23 11:11:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:58.363917033 +0000 UTC m=+1382.988596892" watchObservedRunningTime="2026-01-23 11:11:58.365573271 +0000 UTC m=+1382.990253120" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.365630 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cn9tn" event={"ID":"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7","Type":"ContainerStarted","Data":"27166c1837adbdc89c5481c475709f8ee52c3b8e7825b024b01591831f702fd0"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.370537 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bvhc5" event={"ID":"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4","Type":"ContainerStarted","Data":"b1097d8c29abc6ca4cf5302a3741fbd676541e0275f73f48ba198076edcb83cf"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.374875 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr5lm" event={"ID":"b74dafce-64c6-4c46-886b-bdc4044b9b1e","Type":"ContainerStarted","Data":"2cd40e67188d5ff4fbdef8ddeb34c96a45f8b2c7ddda2c4418dbe14167aab339"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.393212 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" event={"ID":"22d13f4a-d514-4739-b9f5-bcb6107dd167","Type":"ContainerStarted","Data":"a552a2bb3291f1962a7d05ebcca3aa2d13ee141af183eec3a0a9196154e8af49"} Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.589132 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.735690 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-config\") pod \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.737456 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-swift-storage-0\") pod \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.737629 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-sb\") pod \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.737665 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp762\" (UniqueName: \"kubernetes.io/projected/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-kube-api-access-qp762\") pod \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.737707 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-svc\") pod \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.737741 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-nb\") pod \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\" (UID: \"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e\") " Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.817837 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-kube-api-access-qp762" (OuterVolumeSpecName: "kube-api-access-qp762") pod "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" (UID: "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e"). InnerVolumeSpecName "kube-api-access-qp762". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.841304 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp762\" (UniqueName: \"kubernetes.io/projected/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-kube-api-access-qp762\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.860769 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" (UID: "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.903635 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" (UID: "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.911113 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" (UID: "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.916494 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.916810 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" (UID: "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.944462 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.944694 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.944768 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.944840 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:58 crc kubenswrapper[4689]: I0123 11:11:58.955978 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-config" (OuterVolumeSpecName: "config") pod "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" (UID: "31900e42-2a38-4b40-b8dc-7b8b8fb9e41e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.046042 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-config\") pod \"f071329d-803d-4cd2-b338-1fc5743c89ff\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.046121 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-swift-storage-0\") pod \"f071329d-803d-4cd2-b338-1fc5743c89ff\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.046191 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-sb\") pod \"f071329d-803d-4cd2-b338-1fc5743c89ff\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.046222 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgvgr\" (UniqueName: \"kubernetes.io/projected/f071329d-803d-4cd2-b338-1fc5743c89ff-kube-api-access-jgvgr\") pod \"f071329d-803d-4cd2-b338-1fc5743c89ff\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.046507 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-svc\") pod \"f071329d-803d-4cd2-b338-1fc5743c89ff\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.046570 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-nb\") pod \"f071329d-803d-4cd2-b338-1fc5743c89ff\" (UID: \"f071329d-803d-4cd2-b338-1fc5743c89ff\") " Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.047138 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.065453 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f071329d-803d-4cd2-b338-1fc5743c89ff-kube-api-access-jgvgr" (OuterVolumeSpecName: "kube-api-access-jgvgr") pod "f071329d-803d-4cd2-b338-1fc5743c89ff" (UID: "f071329d-803d-4cd2-b338-1fc5743c89ff"). InnerVolumeSpecName "kube-api-access-jgvgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.091869 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f071329d-803d-4cd2-b338-1fc5743c89ff" (UID: "f071329d-803d-4cd2-b338-1fc5743c89ff"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.092448 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-config" (OuterVolumeSpecName: "config") pod "f071329d-803d-4cd2-b338-1fc5743c89ff" (UID: "f071329d-803d-4cd2-b338-1fc5743c89ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.096437 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f071329d-803d-4cd2-b338-1fc5743c89ff" (UID: "f071329d-803d-4cd2-b338-1fc5743c89ff"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.112451 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f071329d-803d-4cd2-b338-1fc5743c89ff" (UID: "f071329d-803d-4cd2-b338-1fc5743c89ff"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.117996 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f071329d-803d-4cd2-b338-1fc5743c89ff" (UID: "f071329d-803d-4cd2-b338-1fc5743c89ff"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.149927 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.149963 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgvgr\" (UniqueName: \"kubernetes.io/projected/f071329d-803d-4cd2-b338-1fc5743c89ff-kube-api-access-jgvgr\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.149976 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.149985 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.149993 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.150012 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f071329d-803d-4cd2-b338-1fc5743c89ff-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.413392 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerStarted","Data":"aee275e51e671a048edcf0ebe613e4539c60e8bd290ec227b3ae0bf496a604d2"} Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.430908 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" event={"ID":"31900e42-2a38-4b40-b8dc-7b8b8fb9e41e","Type":"ContainerDied","Data":"f6014d2d20a7507e480a23b11bf877b2a27a9d5dfeabd4be02e14d7a549aa5c5"} Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.431026 4689 scope.go:117] "RemoveContainer" containerID="8c924dffaa2402963ed5bf7f44c8e8f36c277a9b9e077faead9c3ec6554f2fe4" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.431410 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-w4bpf" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.447398 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" event={"ID":"f071329d-803d-4cd2-b338-1fc5743c89ff","Type":"ContainerDied","Data":"8b5badf03e63c5f90177b898c88b8cfdca8607be8336f37c58712a73d0419194"} Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.447655 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-hvkqz" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.457643 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=31.457621282 podStartE2EDuration="31.457621282s" podCreationTimestamp="2026-01-23 11:11:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:11:59.43636481 +0000 UTC m=+1384.061044679" watchObservedRunningTime="2026-01-23 11:11:59.457621282 +0000 UTC m=+1384.082301141" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.460650 4689 generic.go:334] "Generic (PLEG): container finished" podID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerID="f79513ae169b10b27112e9812f17dda5f5b7543441d510a85d6d44d0fc0a69ee" exitCode=0 Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.460852 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" event={"ID":"22d13f4a-d514-4739-b9f5-bcb6107dd167","Type":"ContainerDied","Data":"f79513ae169b10b27112e9812f17dda5f5b7543441d510a85d6d44d0fc0a69ee"} Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.659339 4689 scope.go:117] "RemoveContainer" containerID="702b0533a10dcfa1a60ef733324866399bbfec17ef96deb421f222ded3b043f0" Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.732740 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-hvkqz"] Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.749219 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-hvkqz"] Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.766950 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-w4bpf"] Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.775281 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-w4bpf"] Jan 23 11:11:59 crc kubenswrapper[4689]: I0123 11:11:59.789518 4689 scope.go:117] "RemoveContainer" containerID="e25213be59b1da2aba1ad933a3495bb124cac1187b26ba171377c97355ea783b" Jan 23 11:12:00 crc kubenswrapper[4689]: I0123 11:12:00.473426 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" event={"ID":"22d13f4a-d514-4739-b9f5-bcb6107dd167","Type":"ContainerStarted","Data":"5b1d89085b6b85019773ef11b65d013490b50db90cd1df1df31f86d6996dd269"} Jan 23 11:12:00 crc kubenswrapper[4689]: I0123 11:12:00.473943 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:12:01 crc kubenswrapper[4689]: I0123 11:12:01.491074 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5tlkt" event={"ID":"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3","Type":"ContainerStarted","Data":"3f1a66de677ef8d25a2a9e01ba012731015e2c73227088db8d19f5312fd16bb5"} Jan 23 11:12:01 crc kubenswrapper[4689]: I0123 11:12:01.507047 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-5tlkt" podStartSLOduration=4.045969035 podStartE2EDuration="42.50703201s" podCreationTimestamp="2026-01-23 11:11:19 +0000 UTC" firstStartedPulling="2026-01-23 11:11:20.897845658 +0000 UTC m=+1345.522525517" lastFinishedPulling="2026-01-23 11:11:59.358908633 +0000 UTC m=+1383.983588492" observedRunningTime="2026-01-23 11:12:01.505753181 +0000 UTC m=+1386.130433040" watchObservedRunningTime="2026-01-23 11:12:01.50703201 +0000 UTC m=+1386.131711869" Jan 23 11:12:01 crc kubenswrapper[4689]: I0123 11:12:01.511066 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" podStartSLOduration=6.511049401 podStartE2EDuration="6.511049401s" podCreationTimestamp="2026-01-23 11:11:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:00.493467229 +0000 UTC m=+1385.118147088" watchObservedRunningTime="2026-01-23 11:12:01.511049401 +0000 UTC m=+1386.135729260" Jan 23 11:12:01 crc kubenswrapper[4689]: I0123 11:12:01.656460 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" path="/var/lib/kubelet/pods/31900e42-2a38-4b40-b8dc-7b8b8fb9e41e/volumes" Jan 23 11:12:01 crc kubenswrapper[4689]: I0123 11:12:01.657190 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f071329d-803d-4cd2-b338-1fc5743c89ff" path="/var/lib/kubelet/pods/f071329d-803d-4cd2-b338-1fc5743c89ff/volumes" Jan 23 11:12:03 crc kubenswrapper[4689]: I0123 11:12:03.514486 4689 generic.go:334] "Generic (PLEG): container finished" podID="bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" containerID="b62357a0edaa657342ce222555b749c18cdd4c1de955d1d28e60f15ab02b2887" exitCode=0 Jan 23 11:12:03 crc kubenswrapper[4689]: I0123 11:12:03.514531 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pwmcq" event={"ID":"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341","Type":"ContainerDied","Data":"b62357a0edaa657342ce222555b749c18cdd4c1de955d1d28e60f15ab02b2887"} Jan 23 11:12:03 crc kubenswrapper[4689]: I0123 11:12:03.944458 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 23 11:12:06 crc kubenswrapper[4689]: I0123 11:12:06.903431 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:12:06 crc kubenswrapper[4689]: I0123 11:12:06.968564 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-whrz5"] Jan 23 11:12:06 crc kubenswrapper[4689]: I0123 11:12:06.968804 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="dnsmasq-dns" containerID="cri-o://ffb58886e789bcb1fe7c5aadfffcb241d0474c7187b7c9defa41caf9384dbea4" gracePeriod=10 Jan 23 11:12:07 crc kubenswrapper[4689]: I0123 11:12:07.567250 4689 generic.go:334] "Generic (PLEG): container finished" podID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerID="ffb58886e789bcb1fe7c5aadfffcb241d0474c7187b7c9defa41caf9384dbea4" exitCode=0 Jan 23 11:12:07 crc kubenswrapper[4689]: I0123 11:12:07.567313 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" event={"ID":"407dd7c8-3c4f-4345-a26e-29f59646bf5e","Type":"ContainerDied","Data":"ffb58886e789bcb1fe7c5aadfffcb241d0474c7187b7c9defa41caf9384dbea4"} Jan 23 11:12:11 crc kubenswrapper[4689]: I0123 11:12:11.397604 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: connect: connection refused" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.776555 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.799264 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-combined-ca-bundle\") pod \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.799385 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-fernet-keys\") pod \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.799414 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk97c\" (UniqueName: \"kubernetes.io/projected/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-kube-api-access-nk97c\") pod \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.799436 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-config-data\") pod \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.799500 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-credential-keys\") pod \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.799541 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-scripts\") pod \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\" (UID: \"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341\") " Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.806126 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-kube-api-access-nk97c" (OuterVolumeSpecName: "kube-api-access-nk97c") pod "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" (UID: "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341"). InnerVolumeSpecName "kube-api-access-nk97c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.806548 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-scripts" (OuterVolumeSpecName: "scripts") pod "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" (UID: "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.806946 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" (UID: "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.816692 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" (UID: "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.833415 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-config-data" (OuterVolumeSpecName: "config-data") pod "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" (UID: "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.845039 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" (UID: "bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.902950 4689 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.902984 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk97c\" (UniqueName: \"kubernetes.io/projected/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-kube-api-access-nk97c\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.902996 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.903004 4689 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.903012 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:12 crc kubenswrapper[4689]: I0123 11:12:12.903020 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.054393 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.054591 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-spk7c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-bvhc5_openstack(47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.055834 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-bvhc5" podUID="47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.654052 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pwmcq" Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.659082 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-bvhc5" podUID="47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.660275 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pwmcq" event={"ID":"bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341","Type":"ContainerDied","Data":"f689d580735c419812c71858e133c6237e98430e1669900391e00d3bd8c91ba2"} Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.660314 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f689d580735c419812c71858e133c6237e98430e1669900391e00d3bd8c91ba2" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.857349 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-pwmcq"] Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.866877 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-pwmcq"] Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.944625 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.954725 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.963463 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8nxsm"] Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.964030 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerName="dnsmasq-dns" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.964057 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerName="dnsmasq-dns" Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.964080 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" containerName="keystone-bootstrap" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.964088 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" containerName="keystone-bootstrap" Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.964114 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f071329d-803d-4cd2-b338-1fc5743c89ff" containerName="init" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.964123 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f071329d-803d-4cd2-b338-1fc5743c89ff" containerName="init" Jan 23 11:12:13 crc kubenswrapper[4689]: E0123 11:12:13.964168 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerName="init" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.964178 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerName="init" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.964400 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f071329d-803d-4cd2-b338-1fc5743c89ff" containerName="init" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.964421 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" containerName="keystone-bootstrap" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.964428 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="31900e42-2a38-4b40-b8dc-7b8b8fb9e41e" containerName="dnsmasq-dns" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.965181 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.967328 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jvx7x" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.967537 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.967686 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.969038 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.974452 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8nxsm"] Jan 23 11:12:13 crc kubenswrapper[4689]: I0123 11:12:13.977600 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.030921 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-combined-ca-bundle\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.031551 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-config-data\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.031748 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-fernet-keys\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.031792 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-credential-keys\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.032006 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2n55\" (UniqueName: \"kubernetes.io/projected/a8604732-adb6-4e50-b9a4-107ebb88d8a4-kube-api-access-t2n55\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.032067 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-scripts\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.133488 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-fernet-keys\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.133532 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-credential-keys\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.133585 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2n55\" (UniqueName: \"kubernetes.io/projected/a8604732-adb6-4e50-b9a4-107ebb88d8a4-kube-api-access-t2n55\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.133613 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-scripts\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.133709 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-combined-ca-bundle\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.133794 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-config-data\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.138387 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-scripts\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.138410 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-credential-keys\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.139238 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-fernet-keys\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.139289 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-combined-ca-bundle\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.141301 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-config-data\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.157700 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2n55\" (UniqueName: \"kubernetes.io/projected/a8604732-adb6-4e50-b9a4-107ebb88d8a4-kube-api-access-t2n55\") pod \"keystone-bootstrap-8nxsm\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.326054 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.666108 4689 generic.go:334] "Generic (PLEG): container finished" podID="30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" containerID="3f1a66de677ef8d25a2a9e01ba012731015e2c73227088db8d19f5312fd16bb5" exitCode=0 Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.666212 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5tlkt" event={"ID":"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3","Type":"ContainerDied","Data":"3f1a66de677ef8d25a2a9e01ba012731015e2c73227088db8d19f5312fd16bb5"} Jan 23 11:12:14 crc kubenswrapper[4689]: I0123 11:12:14.671499 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 23 11:12:15 crc kubenswrapper[4689]: I0123 11:12:15.655016 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341" path="/var/lib/kubelet/pods/bb4ac6a8-dc7f-4d27-8a2a-1c4f8546d341/volumes" Jan 23 11:12:21 crc kubenswrapper[4689]: I0123 11:12:21.398564 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Jan 23 11:12:23 crc kubenswrapper[4689]: E0123 11:12:23.145398 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Jan 23 11:12:23 crc kubenswrapper[4689]: E0123 11:12:23.145995 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n677h7bhf6h69h57hb7h58h65fh59dh6dh548h5dbhd6h56dh5f8h67fh5f7h654h578h68h79h668h55h57h59bhf9h99hc6h5f7hfdh679hbfq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k86fs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(82880b13-2692-46b7-a0b0-fb06b87426dd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:12:24 crc kubenswrapper[4689]: E0123 11:12:24.433937 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 23 11:12:24 crc kubenswrapper[4689]: E0123 11:12:24.434578 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lbg45,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-lr5lm_openstack(b74dafce-64c6-4c46-886b-bdc4044b9b1e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:12:24 crc kubenswrapper[4689]: E0123 11:12:24.435847 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-lr5lm" podUID="b74dafce-64c6-4c46-886b-bdc4044b9b1e" Jan 23 11:12:24 crc kubenswrapper[4689]: E0123 11:12:24.780214 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-lr5lm" podUID="b74dafce-64c6-4c46-886b-bdc4044b9b1e" Jan 23 11:12:24 crc kubenswrapper[4689]: E0123 11:12:24.863738 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 23 11:12:24 crc kubenswrapper[4689]: E0123 11:12:24.864044 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pfgwf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-cn9tn_openstack(3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:12:24 crc kubenswrapper[4689]: E0123 11:12:24.865871 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-cn9tn" podUID="3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.102124 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5tlkt" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.107235 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.206714 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-db-sync-config-data\") pod \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.206804 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-dns-svc\") pod \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.206862 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-nb\") pod \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.206898 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-config-data\") pod \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.206966 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-combined-ca-bundle\") pod \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.207035 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc2tn\" (UniqueName: \"kubernetes.io/projected/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-kube-api-access-hc2tn\") pod \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\" (UID: \"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.207154 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-sb\") pod \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.207264 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxx5j\" (UniqueName: \"kubernetes.io/projected/407dd7c8-3c4f-4345-a26e-29f59646bf5e-kube-api-access-lxx5j\") pod \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.207372 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-config\") pod \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\" (UID: \"407dd7c8-3c4f-4345-a26e-29f59646bf5e\") " Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.213383 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/407dd7c8-3c4f-4345-a26e-29f59646bf5e-kube-api-access-lxx5j" (OuterVolumeSpecName: "kube-api-access-lxx5j") pod "407dd7c8-3c4f-4345-a26e-29f59646bf5e" (UID: "407dd7c8-3c4f-4345-a26e-29f59646bf5e"). InnerVolumeSpecName "kube-api-access-lxx5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.214419 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" (UID: "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.232888 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-kube-api-access-hc2tn" (OuterVolumeSpecName: "kube-api-access-hc2tn") pod "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" (UID: "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3"). InnerVolumeSpecName "kube-api-access-hc2tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.249869 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" (UID: "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.279958 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-config" (OuterVolumeSpecName: "config") pod "407dd7c8-3c4f-4345-a26e-29f59646bf5e" (UID: "407dd7c8-3c4f-4345-a26e-29f59646bf5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.284453 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "407dd7c8-3c4f-4345-a26e-29f59646bf5e" (UID: "407dd7c8-3c4f-4345-a26e-29f59646bf5e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.309736 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.309788 4689 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.309810 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.309821 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.309835 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc2tn\" (UniqueName: \"kubernetes.io/projected/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-kube-api-access-hc2tn\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.309846 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxx5j\" (UniqueName: \"kubernetes.io/projected/407dd7c8-3c4f-4345-a26e-29f59646bf5e-kube-api-access-lxx5j\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.309737 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8nxsm"] Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.317875 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "407dd7c8-3c4f-4345-a26e-29f59646bf5e" (UID: "407dd7c8-3c4f-4345-a26e-29f59646bf5e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.329741 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-config-data" (OuterVolumeSpecName: "config-data") pod "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" (UID: "30a7d451-e70f-43cd-ae81-e5ccbdcb53f3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.333495 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "407dd7c8-3c4f-4345-a26e-29f59646bf5e" (UID: "407dd7c8-3c4f-4345-a26e-29f59646bf5e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.411879 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.411935 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.411950 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/407dd7c8-3c4f-4345-a26e-29f59646bf5e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:25 crc kubenswrapper[4689]: W0123 11:12:25.693411 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8604732_adb6_4e50_b9a4_107ebb88d8a4.slice/crio-6dcf686040e93816074c79630e9aa2dfb5d2656d619263441159f1b15a9d354a WatchSource:0}: Error finding container 6dcf686040e93816074c79630e9aa2dfb5d2656d619263441159f1b15a9d354a: Status 404 returned error can't find the container with id 6dcf686040e93816074c79630e9aa2dfb5d2656d619263441159f1b15a9d354a Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.789466 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nxsm" event={"ID":"a8604732-adb6-4e50-b9a4-107ebb88d8a4","Type":"ContainerStarted","Data":"6dcf686040e93816074c79630e9aa2dfb5d2656d619263441159f1b15a9d354a"} Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.791357 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hm599" event={"ID":"a5e5ddbf-b676-44a1-996e-a6aafe2280e5","Type":"ContainerStarted","Data":"d43e229a28663b1c0b272376a2cd80cc5b23a03ee80a4ee7023c4acf2fd9cc3d"} Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.794655 4689 generic.go:334] "Generic (PLEG): container finished" podID="c1455952-eacb-400c-bb65-d9d6ca95a674" containerID="390a1755d4c44f561a50ea749baa8e36f0681bd1a5e7f51c9c5511eddbf23124" exitCode=0 Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.794707 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-b6958" event={"ID":"c1455952-eacb-400c-bb65-d9d6ca95a674","Type":"ContainerDied","Data":"390a1755d4c44f561a50ea749baa8e36f0681bd1a5e7f51c9c5511eddbf23124"} Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.813232 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" event={"ID":"407dd7c8-3c4f-4345-a26e-29f59646bf5e","Type":"ContainerDied","Data":"2aeaf67fc3e330c740bb7f14d654be28e557b8d35aa3645a07bc0edc5e43bac8"} Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.813296 4689 scope.go:117] "RemoveContainer" containerID="ffb58886e789bcb1fe7c5aadfffcb241d0474c7187b7c9defa41caf9384dbea4" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.813358 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.816126 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-hm599" podStartSLOduration=5.587505323 podStartE2EDuration="30.816115182s" podCreationTimestamp="2026-01-23 11:11:55 +0000 UTC" firstStartedPulling="2026-01-23 11:11:57.896068421 +0000 UTC m=+1382.520748280" lastFinishedPulling="2026-01-23 11:12:23.12467828 +0000 UTC m=+1407.749358139" observedRunningTime="2026-01-23 11:12:25.812411497 +0000 UTC m=+1410.437091356" watchObservedRunningTime="2026-01-23 11:12:25.816115182 +0000 UTC m=+1410.440795041" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.822648 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5tlkt" event={"ID":"30a7d451-e70f-43cd-ae81-e5ccbdcb53f3","Type":"ContainerDied","Data":"4db5b1c2f033f9a25b1a9eb343e730ba9430834bc567bda3abb934a9820677a6"} Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.822730 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4db5b1c2f033f9a25b1a9eb343e730ba9430834bc567bda3abb934a9820677a6" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.822679 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5tlkt" Jan 23 11:12:25 crc kubenswrapper[4689]: E0123 11:12:25.824524 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-cn9tn" podUID="3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.846455 4689 scope.go:117] "RemoveContainer" containerID="08ac6691028a298f51f6c0c1b8fa5123b4863d8f0c6476f72c42d29a12f697fa" Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.860636 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-whrz5"] Jan 23 11:12:25 crc kubenswrapper[4689]: I0123 11:12:25.875266 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-whrz5"] Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.403669 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-whrz5" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.512613 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-xdqhz"] Jan 23 11:12:26 crc kubenswrapper[4689]: E0123 11:12:26.520655 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="dnsmasq-dns" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.520694 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="dnsmasq-dns" Jan 23 11:12:26 crc kubenswrapper[4689]: E0123 11:12:26.520710 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="init" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.520716 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="init" Jan 23 11:12:26 crc kubenswrapper[4689]: E0123 11:12:26.520740 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" containerName="glance-db-sync" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.520748 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" containerName="glance-db-sync" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.520947 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" containerName="glance-db-sync" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.520958 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" containerName="dnsmasq-dns" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.522037 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.556361 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-xdqhz"] Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.649251 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svbqx\" (UniqueName: \"kubernetes.io/projected/8cc34682-728e-4820-a460-c8f0e54807ee-kube-api-access-svbqx\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.649307 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.649339 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.649479 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.649522 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.649545 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.750987 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.754177 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.755767 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.757114 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.756801 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.758124 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svbqx\" (UniqueName: \"kubernetes.io/projected/8cc34682-728e-4820-a460-c8f0e54807ee-kube-api-access-svbqx\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.758338 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.758454 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.760391 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.761308 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.761971 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.796692 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svbqx\" (UniqueName: \"kubernetes.io/projected/8cc34682-728e-4820-a460-c8f0e54807ee-kube-api-access-svbqx\") pod \"dnsmasq-dns-57c957c4ff-xdqhz\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.845412 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.874409 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nxsm" event={"ID":"a8604732-adb6-4e50-b9a4-107ebb88d8a4","Type":"ContainerStarted","Data":"144c9a3dc48192981a75f2216df78a5a958e3ae3631dac62066239a49a785d73"} Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.908137 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerStarted","Data":"2b27ce299afe74cbb7fc99210c5d42e451dff1e01d81ea3cfcab106c00fb4038"} Jan 23 11:12:26 crc kubenswrapper[4689]: I0123 11:12:26.915827 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8nxsm" podStartSLOduration=13.915807266 podStartE2EDuration="13.915807266s" podCreationTimestamp="2026-01-23 11:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:26.898953384 +0000 UTC m=+1411.523633243" watchObservedRunningTime="2026-01-23 11:12:26.915807266 +0000 UTC m=+1411.540487125" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.422732 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-b6958" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.533520 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-xdqhz"] Jan 23 11:12:27 crc kubenswrapper[4689]: W0123 11:12:27.536577 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8cc34682_728e_4820_a460_c8f0e54807ee.slice/crio-616838384aadfd83e80884c96b2af5cdaf17d7e41c7681f81819c350dd394b87 WatchSource:0}: Error finding container 616838384aadfd83e80884c96b2af5cdaf17d7e41c7681f81819c350dd394b87: Status 404 returned error can't find the container with id 616838384aadfd83e80884c96b2af5cdaf17d7e41c7681f81819c350dd394b87 Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.555755 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:27 crc kubenswrapper[4689]: E0123 11:12:27.556317 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1455952-eacb-400c-bb65-d9d6ca95a674" containerName="neutron-db-sync" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.556337 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1455952-eacb-400c-bb65-d9d6ca95a674" containerName="neutron-db-sync" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.556576 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1455952-eacb-400c-bb65-d9d6ca95a674" containerName="neutron-db-sync" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.557614 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.560037 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.560359 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.564708 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-pcxx6" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.581680 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-config\") pod \"c1455952-eacb-400c-bb65-d9d6ca95a674\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.585717 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8fh4\" (UniqueName: \"kubernetes.io/projected/c1455952-eacb-400c-bb65-d9d6ca95a674-kube-api-access-p8fh4\") pod \"c1455952-eacb-400c-bb65-d9d6ca95a674\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.586006 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-combined-ca-bundle\") pod \"c1455952-eacb-400c-bb65-d9d6ca95a674\" (UID: \"c1455952-eacb-400c-bb65-d9d6ca95a674\") " Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.604539 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1455952-eacb-400c-bb65-d9d6ca95a674-kube-api-access-p8fh4" (OuterVolumeSpecName: "kube-api-access-p8fh4") pod "c1455952-eacb-400c-bb65-d9d6ca95a674" (UID: "c1455952-eacb-400c-bb65-d9d6ca95a674"). InnerVolumeSpecName "kube-api-access-p8fh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.607222 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.619341 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-config" (OuterVolumeSpecName: "config") pod "c1455952-eacb-400c-bb65-d9d6ca95a674" (UID: "c1455952-eacb-400c-bb65-d9d6ca95a674"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.621310 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1455952-eacb-400c-bb65-d9d6ca95a674" (UID: "c1455952-eacb-400c-bb65-d9d6ca95a674"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.670189 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="407dd7c8-3c4f-4345-a26e-29f59646bf5e" path="/var/lib/kubelet/pods/407dd7c8-3c4f-4345-a26e-29f59646bf5e/volumes" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688450 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688559 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688673 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-logs\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688704 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688726 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688753 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688768 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzvr8\" (UniqueName: \"kubernetes.io/projected/5e6a9707-016e-4322-b6ea-ce7dda941021-kube-api-access-lzvr8\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688818 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8fh4\" (UniqueName: \"kubernetes.io/projected/c1455952-eacb-400c-bb65-d9d6ca95a674-kube-api-access-p8fh4\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688829 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.688840 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c1455952-eacb-400c-bb65-d9d6ca95a674-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.746141 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.747984 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.750856 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.774341 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.790451 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.790596 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-logs\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.790653 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.790684 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.790722 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.790745 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzvr8\" (UniqueName: \"kubernetes.io/projected/5e6a9707-016e-4322-b6ea-ce7dda941021-kube-api-access-lzvr8\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.790868 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.792612 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.793891 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-logs\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.795751 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.795786 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0138e8ab527b9277b6df488760b5dddec306b1306b4e1e9b3fbb986ed4f86327/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.796754 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.798328 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.799324 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.811829 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzvr8\" (UniqueName: \"kubernetes.io/projected/5e6a9707-016e-4322-b6ea-ce7dda941021-kube-api-access-lzvr8\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.861437 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.881254 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.892940 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.893013 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.893058 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-logs\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.893135 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.893188 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.893242 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.893273 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnc66\" (UniqueName: \"kubernetes.io/projected/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-kube-api-access-hnc66\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:27 crc kubenswrapper[4689]: I0123 11:12:27.974839 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bvhc5" event={"ID":"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4","Type":"ContainerStarted","Data":"1ba8e049bf2430cda7670a46927a921dc626316e0c06256c239ae8673054f611"} Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:27.996802 4689 generic.go:334] "Generic (PLEG): container finished" podID="8cc34682-728e-4820-a460-c8f0e54807ee" containerID="9342cf75540fc7ffadca75540e3ab71848e13baad1430f07df05bfdb7b67a415" exitCode=0 Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:27.996916 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" event={"ID":"8cc34682-728e-4820-a460-c8f0e54807ee","Type":"ContainerDied","Data":"9342cf75540fc7ffadca75540e3ab71848e13baad1430f07df05bfdb7b67a415"} Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:27.996943 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" event={"ID":"8cc34682-728e-4820-a460-c8f0e54807ee","Type":"ContainerStarted","Data":"616838384aadfd83e80884c96b2af5cdaf17d7e41c7681f81819c350dd394b87"} Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.005903 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.006031 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.006098 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-logs\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.006223 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.006295 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.006375 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.006417 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnc66\" (UniqueName: \"kubernetes.io/projected/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-kube-api-access-hnc66\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.008800 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.017444 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-logs\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.019335 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.021436 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.049850 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-b6958" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.050298 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-b6958" event={"ID":"c1455952-eacb-400c-bb65-d9d6ca95a674","Type":"ContainerDied","Data":"0d382be03615399c2ccb8d7d847deeae7615701e18b92a45e1b9c728a5e30195"} Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.050347 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d382be03615399c2ccb8d7d847deeae7615701e18b92a45e1b9c728a5e30195" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.054517 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.056042 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.056094 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/13eb8beee2c91a18127e0c8709aa1780f04c8bb652adcd05937e8ea03b9a58c8/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.112673 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnc66\" (UniqueName: \"kubernetes.io/projected/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-kube-api-access-hnc66\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.174091 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-xdqhz"] Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.202013 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-bvhc5" podStartSLOduration=3.585737775 podStartE2EDuration="33.201990191s" podCreationTimestamp="2026-01-23 11:11:55 +0000 UTC" firstStartedPulling="2026-01-23 11:11:57.507134028 +0000 UTC m=+1382.131813887" lastFinishedPulling="2026-01-23 11:12:27.123386444 +0000 UTC m=+1411.748066303" observedRunningTime="2026-01-23 11:12:28.077530688 +0000 UTC m=+1412.702210547" watchObservedRunningTime="2026-01-23 11:12:28.201990191 +0000 UTC m=+1412.826670050" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.273489 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.306582 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-lxq4w"] Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.308564 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.319825 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-lxq4w"] Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.343246 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5fdbbd7548-jpgxk"] Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.345069 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.347678 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-h7trg" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.347733 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.347936 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.349819 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.355392 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fdbbd7548-jpgxk"] Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.379832 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436029 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-combined-ca-bundle\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436086 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-httpd-config\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436112 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-config\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436183 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436219 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436245 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-config\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436284 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436300 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-ovndb-tls-certs\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436326 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx8hs\" (UniqueName: \"kubernetes.io/projected/980459f0-f118-4e49-9ee4-a05ec5876c2c-kube-api-access-vx8hs\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436369 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqk5n\" (UniqueName: \"kubernetes.io/projected/5760eb27-db85-406c-8367-f03313a9a14a-kube-api-access-mqk5n\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.436399 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539541 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-combined-ca-bundle\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539612 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-httpd-config\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539658 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-config\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539740 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539794 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539838 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-config\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539903 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539949 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-ovndb-tls-certs\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.539994 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx8hs\" (UniqueName: \"kubernetes.io/projected/980459f0-f118-4e49-9ee4-a05ec5876c2c-kube-api-access-vx8hs\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.540059 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqk5n\" (UniqueName: \"kubernetes.io/projected/5760eb27-db85-406c-8367-f03313a9a14a-kube-api-access-mqk5n\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.540093 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.541398 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.542138 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.544891 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.546345 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-config\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.547037 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.547247 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-combined-ca-bundle\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.549643 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-httpd-config\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.550827 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-config\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.560238 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-ovndb-tls-certs\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.582358 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx8hs\" (UniqueName: \"kubernetes.io/projected/980459f0-f118-4e49-9ee4-a05ec5876c2c-kube-api-access-vx8hs\") pod \"dnsmasq-dns-5ccc5c4795-lxq4w\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.582889 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqk5n\" (UniqueName: \"kubernetes.io/projected/5760eb27-db85-406c-8367-f03313a9a14a-kube-api-access-mqk5n\") pod \"neutron-5fdbbd7548-jpgxk\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:28 crc kubenswrapper[4689]: E0123 11:12:28.597541 4689 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 23 11:12:28 crc kubenswrapper[4689]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/8cc34682-728e-4820-a460-c8f0e54807ee/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 23 11:12:28 crc kubenswrapper[4689]: > podSandboxID="616838384aadfd83e80884c96b2af5cdaf17d7e41c7681f81819c350dd394b87" Jan 23 11:12:28 crc kubenswrapper[4689]: E0123 11:12:28.598086 4689 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 11:12:28 crc kubenswrapper[4689]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8ch5f4h548h647h5fdh585hb9h5dfh98h69h75h6bhcchcdh67h549h59fh687h88h59chddh85h55bh5b6h5f9h5b6h64dh669h657h5cfh558h688q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-swift-storage-0,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-swift-storage-0,SubPath:dns-swift-storage-0,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-svbqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57c957c4ff-xdqhz_openstack(8cc34682-728e-4820-a460-c8f0e54807ee): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/8cc34682-728e-4820-a460-c8f0e54807ee/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 23 11:12:28 crc kubenswrapper[4689]: > logger="UnhandledError" Jan 23 11:12:28 crc kubenswrapper[4689]: E0123 11:12:28.599596 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/8cc34682-728e-4820-a460-c8f0e54807ee/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" podUID="8cc34682-728e-4820-a460-c8f0e54807ee" Jan 23 11:12:28 crc kubenswrapper[4689]: W0123 11:12:28.719134 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e6a9707_016e_4322_b6ea_ce7dda941021.slice/crio-d8471837d6a643c9878f6f6eed28aeaade87b3065df33c2232764b939c2480c6 WatchSource:0}: Error finding container d8471837d6a643c9878f6f6eed28aeaade87b3065df33c2232764b939c2480c6: Status 404 returned error can't find the container with id d8471837d6a643c9878f6f6eed28aeaade87b3065df33c2232764b939c2480c6 Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.723712 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.740664 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:28 crc kubenswrapper[4689]: I0123 11:12:28.750814 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.101916 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e6a9707-016e-4322-b6ea-ce7dda941021","Type":"ContainerStarted","Data":"d8471837d6a643c9878f6f6eed28aeaade87b3065df33c2232764b939c2480c6"} Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.108620 4689 generic.go:334] "Generic (PLEG): container finished" podID="a5e5ddbf-b676-44a1-996e-a6aafe2280e5" containerID="d43e229a28663b1c0b272376a2cd80cc5b23a03ee80a4ee7023c4acf2fd9cc3d" exitCode=0 Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.108847 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hm599" event={"ID":"a5e5ddbf-b676-44a1-996e-a6aafe2280e5","Type":"ContainerDied","Data":"d43e229a28663b1c0b272376a2cd80cc5b23a03ee80a4ee7023c4acf2fd9cc3d"} Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.150565 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.493798 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fdbbd7548-jpgxk"] Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.531051 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-lxq4w"] Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.677934 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.717369 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.729316 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.880092 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config\") pod \"8cc34682-728e-4820-a460-c8f0e54807ee\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.880614 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-svc\") pod \"8cc34682-728e-4820-a460-c8f0e54807ee\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.880705 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svbqx\" (UniqueName: \"kubernetes.io/projected/8cc34682-728e-4820-a460-c8f0e54807ee-kube-api-access-svbqx\") pod \"8cc34682-728e-4820-a460-c8f0e54807ee\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.880926 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-swift-storage-0\") pod \"8cc34682-728e-4820-a460-c8f0e54807ee\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.881106 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-nb\") pod \"8cc34682-728e-4820-a460-c8f0e54807ee\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.881193 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-sb\") pod \"8cc34682-728e-4820-a460-c8f0e54807ee\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.914035 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cc34682-728e-4820-a460-c8f0e54807ee-kube-api-access-svbqx" (OuterVolumeSpecName: "kube-api-access-svbqx") pod "8cc34682-728e-4820-a460-c8f0e54807ee" (UID: "8cc34682-728e-4820-a460-c8f0e54807ee"). InnerVolumeSpecName "kube-api-access-svbqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:29 crc kubenswrapper[4689]: I0123 11:12:29.984397 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svbqx\" (UniqueName: \"kubernetes.io/projected/8cc34682-728e-4820-a460-c8f0e54807ee-kube-api-access-svbqx\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.133538 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8cc34682-728e-4820-a460-c8f0e54807ee" (UID: "8cc34682-728e-4820-a460-c8f0e54807ee"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.141806 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7","Type":"ContainerStarted","Data":"ee6d399a907fc24fd0863916a63edd9899fa21170c146db37fd536a9a063d1bf"} Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.154997 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fdbbd7548-jpgxk" event={"ID":"5760eb27-db85-406c-8367-f03313a9a14a","Type":"ContainerStarted","Data":"0058ef1f42fc1f29f7df96b88f31189ae7bbc0cac96875f1a3fcfb593a580c34"} Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.163300 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8cc34682-728e-4820-a460-c8f0e54807ee" (UID: "8cc34682-728e-4820-a460-c8f0e54807ee"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.164326 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e6a9707-016e-4322-b6ea-ce7dda941021","Type":"ContainerStarted","Data":"ce07c69095125ef6813bc798235451cc83c64a5004caff8263178775d7765665"} Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.172680 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8cc34682-728e-4820-a460-c8f0e54807ee" (UID: "8cc34682-728e-4820-a460-c8f0e54807ee"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.175963 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" event={"ID":"8cc34682-728e-4820-a460-c8f0e54807ee","Type":"ContainerDied","Data":"616838384aadfd83e80884c96b2af5cdaf17d7e41c7681f81819c350dd394b87"} Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.176035 4689 scope.go:117] "RemoveContainer" containerID="9342cf75540fc7ffadca75540e3ab71848e13baad1430f07df05bfdb7b67a415" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.176058 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-xdqhz" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.186723 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" event={"ID":"980459f0-f118-4e49-9ee4-a05ec5876c2c","Type":"ContainerStarted","Data":"a81009394aeda1c5845e2a7b680baceaf3d373b141aaaed0415c2fc3f1922ab4"} Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.190853 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config" (OuterVolumeSpecName: "config") pod "8cc34682-728e-4820-a460-c8f0e54807ee" (UID: "8cc34682-728e-4820-a460-c8f0e54807ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.192719 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config\") pod \"8cc34682-728e-4820-a460-c8f0e54807ee\" (UID: \"8cc34682-728e-4820-a460-c8f0e54807ee\") " Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.193008 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8cc34682-728e-4820-a460-c8f0e54807ee" (UID: "8cc34682-728e-4820-a460-c8f0e54807ee"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: W0123 11:12:30.193183 4689 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/8cc34682-728e-4820-a460-c8f0e54807ee/volumes/kubernetes.io~configmap/config Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.193205 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config" (OuterVolumeSpecName: "config") pod "8cc34682-728e-4820-a460-c8f0e54807ee" (UID: "8cc34682-728e-4820-a460-c8f0e54807ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.195534 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.195586 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.195601 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.195614 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.195671 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8cc34682-728e-4820-a460-c8f0e54807ee-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.681553 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-xdqhz"] Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.693951 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-xdqhz"] Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.882582 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hm599" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.938524 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nds7\" (UniqueName: \"kubernetes.io/projected/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-kube-api-access-6nds7\") pod \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.939052 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-combined-ca-bundle\") pod \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.939313 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-config-data\") pod \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.939370 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-logs\") pod \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.939408 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-scripts\") pod \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\" (UID: \"a5e5ddbf-b676-44a1-996e-a6aafe2280e5\") " Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.943747 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-logs" (OuterVolumeSpecName: "logs") pod "a5e5ddbf-b676-44a1-996e-a6aafe2280e5" (UID: "a5e5ddbf-b676-44a1-996e-a6aafe2280e5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.950340 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-scripts" (OuterVolumeSpecName: "scripts") pod "a5e5ddbf-b676-44a1-996e-a6aafe2280e5" (UID: "a5e5ddbf-b676-44a1-996e-a6aafe2280e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.950658 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-kube-api-access-6nds7" (OuterVolumeSpecName: "kube-api-access-6nds7") pod "a5e5ddbf-b676-44a1-996e-a6aafe2280e5" (UID: "a5e5ddbf-b676-44a1-996e-a6aafe2280e5"). InnerVolumeSpecName "kube-api-access-6nds7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:30 crc kubenswrapper[4689]: I0123 11:12:30.978257 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-config-data" (OuterVolumeSpecName: "config-data") pod "a5e5ddbf-b676-44a1-996e-a6aafe2280e5" (UID: "a5e5ddbf-b676-44a1-996e-a6aafe2280e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.026927 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5e5ddbf-b676-44a1-996e-a6aafe2280e5" (UID: "a5e5ddbf-b676-44a1-996e-a6aafe2280e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.051574 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.051612 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nds7\" (UniqueName: \"kubernetes.io/projected/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-kube-api-access-6nds7\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.051623 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.051633 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.051644 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5e5ddbf-b676-44a1-996e-a6aafe2280e5-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.202282 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7","Type":"ContainerStarted","Data":"b0fd81ce1f3137d9fefec80dd7232b4881694ab03f9745191007e1ff8c81b81a"} Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.207763 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hm599" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.207762 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hm599" event={"ID":"a5e5ddbf-b676-44a1-996e-a6aafe2280e5","Type":"ContainerDied","Data":"39cafe709aa278dc40bb68f17fa6f983509b5b09a1eb738254032faa1ba12a29"} Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.207986 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39cafe709aa278dc40bb68f17fa6f983509b5b09a1eb738254032faa1ba12a29" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.212439 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fdbbd7548-jpgxk" event={"ID":"5760eb27-db85-406c-8367-f03313a9a14a","Type":"ContainerStarted","Data":"fa2d601b21d2ee1b57a47a47dcd543ffe7bf1592de35d3457dde31db80efdab0"} Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.212481 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fdbbd7548-jpgxk" event={"ID":"5760eb27-db85-406c-8367-f03313a9a14a","Type":"ContainerStarted","Data":"84571097cab79784f9cd491a59409d46cd57299976df5353d6252a8632cee3d2"} Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.212725 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.215258 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e6a9707-016e-4322-b6ea-ce7dda941021","Type":"ContainerStarted","Data":"c7f3c3124f2d03830cb9ed2c51f07b16eed3f49c3b1a05c7644aafafeaa069e0"} Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.215413 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-log" containerID="cri-o://ce07c69095125ef6813bc798235451cc83c64a5004caff8263178775d7765665" gracePeriod=30 Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.215478 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-httpd" containerID="cri-o://c7f3c3124f2d03830cb9ed2c51f07b16eed3f49c3b1a05c7644aafafeaa069e0" gracePeriod=30 Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.228901 4689 generic.go:334] "Generic (PLEG): container finished" podID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerID="97f054985c008fc2d05deea0e28fe31a4763fd73a3c97a4ca0d6b90c1bc747f1" exitCode=0 Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.228934 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" event={"ID":"980459f0-f118-4e49-9ee4-a05ec5876c2c","Type":"ContainerDied","Data":"97f054985c008fc2d05deea0e28fe31a4763fd73a3c97a4ca0d6b90c1bc747f1"} Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.243891 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5fdbbd7548-jpgxk" podStartSLOduration=3.24386706 podStartE2EDuration="3.24386706s" podCreationTimestamp="2026-01-23 11:12:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:31.237115157 +0000 UTC m=+1415.861795016" watchObservedRunningTime="2026-01-23 11:12:31.24386706 +0000 UTC m=+1415.868546909" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.270209 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-68cfbc8d8-tc5p4"] Jan 23 11:12:31 crc kubenswrapper[4689]: E0123 11:12:31.270892 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5e5ddbf-b676-44a1-996e-a6aafe2280e5" containerName="placement-db-sync" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.270921 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5e5ddbf-b676-44a1-996e-a6aafe2280e5" containerName="placement-db-sync" Jan 23 11:12:31 crc kubenswrapper[4689]: E0123 11:12:31.270956 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cc34682-728e-4820-a460-c8f0e54807ee" containerName="init" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.270966 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cc34682-728e-4820-a460-c8f0e54807ee" containerName="init" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.271260 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5e5ddbf-b676-44a1-996e-a6aafe2280e5" containerName="placement-db-sync" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.271305 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cc34682-728e-4820-a460-c8f0e54807ee" containerName="init" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.274394 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.277306 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-fgc9f" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.277658 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.277938 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.278130 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.278218 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.301178 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-68cfbc8d8-tc5p4"] Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.349957 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.349942687 podStartE2EDuration="5.349942687s" podCreationTimestamp="2026-01-23 11:12:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:31.341487505 +0000 UTC m=+1415.966167364" watchObservedRunningTime="2026-01-23 11:12:31.349942687 +0000 UTC m=+1415.974622546" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.363357 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-internal-tls-certs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.363468 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-config-data\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.363526 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bnln\" (UniqueName: \"kubernetes.io/projected/5b421cc4-175a-484f-a454-3c38db90b6c5-kube-api-access-7bnln\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.363557 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-combined-ca-bundle\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.363600 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-scripts\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.363694 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b421cc4-175a-484f-a454-3c38db90b6c5-logs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.363771 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-public-tls-certs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.465660 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-scripts\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.465766 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b421cc4-175a-484f-a454-3c38db90b6c5-logs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.465846 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-public-tls-certs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.465941 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-internal-tls-certs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.465994 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-config-data\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.466035 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bnln\" (UniqueName: \"kubernetes.io/projected/5b421cc4-175a-484f-a454-3c38db90b6c5-kube-api-access-7bnln\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.466056 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-combined-ca-bundle\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.470092 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-scripts\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.470452 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b421cc4-175a-484f-a454-3c38db90b6c5-logs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.471804 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-internal-tls-certs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.473077 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-combined-ca-bundle\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.474057 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-public-tls-certs\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.480590 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b421cc4-175a-484f-a454-3c38db90b6c5-config-data\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.500905 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bnln\" (UniqueName: \"kubernetes.io/projected/5b421cc4-175a-484f-a454-3c38db90b6c5-kube-api-access-7bnln\") pod \"placement-68cfbc8d8-tc5p4\" (UID: \"5b421cc4-175a-484f-a454-3c38db90b6c5\") " pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.655088 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cc34682-728e-4820-a460-c8f0e54807ee" path="/var/lib/kubelet/pods/8cc34682-728e-4820-a460-c8f0e54807ee/volumes" Jan 23 11:12:31 crc kubenswrapper[4689]: I0123 11:12:31.730166 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.265875 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" event={"ID":"980459f0-f118-4e49-9ee4-a05ec5876c2c","Type":"ContainerStarted","Data":"c7c30db92bdc875cce4aefa0694b1272c32306535a4d8a68b4bc32de3bcb92c5"} Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.266201 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.268036 4689 generic.go:334] "Generic (PLEG): container finished" podID="a8604732-adb6-4e50-b9a4-107ebb88d8a4" containerID="144c9a3dc48192981a75f2216df78a5a958e3ae3631dac62066239a49a785d73" exitCode=0 Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.268114 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nxsm" event={"ID":"a8604732-adb6-4e50-b9a4-107ebb88d8a4","Type":"ContainerDied","Data":"144c9a3dc48192981a75f2216df78a5a958e3ae3631dac62066239a49a785d73"} Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.270037 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7","Type":"ContainerStarted","Data":"8c0cd05ab8e4510c97cce55bbfd65475091d68cd4699957d837e72940f27a119"} Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.270195 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-log" containerID="cri-o://b0fd81ce1f3137d9fefec80dd7232b4881694ab03f9745191007e1ff8c81b81a" gracePeriod=30 Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.270538 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-httpd" containerID="cri-o://8c0cd05ab8e4510c97cce55bbfd65475091d68cd4699957d837e72940f27a119" gracePeriod=30 Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.283940 4689 generic.go:334] "Generic (PLEG): container finished" podID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerID="c7f3c3124f2d03830cb9ed2c51f07b16eed3f49c3b1a05c7644aafafeaa069e0" exitCode=143 Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.283974 4689 generic.go:334] "Generic (PLEG): container finished" podID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerID="ce07c69095125ef6813bc798235451cc83c64a5004caff8263178775d7765665" exitCode=143 Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.284004 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e6a9707-016e-4322-b6ea-ce7dda941021","Type":"ContainerDied","Data":"c7f3c3124f2d03830cb9ed2c51f07b16eed3f49c3b1a05c7644aafafeaa069e0"} Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.284059 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e6a9707-016e-4322-b6ea-ce7dda941021","Type":"ContainerDied","Data":"ce07c69095125ef6813bc798235451cc83c64a5004caff8263178775d7765665"} Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.304485 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" podStartSLOduration=4.30446369 podStartE2EDuration="4.30446369s" podCreationTimestamp="2026-01-23 11:12:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:32.303949227 +0000 UTC m=+1416.928629106" watchObservedRunningTime="2026-01-23 11:12:32.30446369 +0000 UTC m=+1416.929143549" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.382297 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-68cfbc8d8-tc5p4"] Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.392910 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.392886995 podStartE2EDuration="6.392886995s" podCreationTimestamp="2026-01-23 11:12:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:32.327370509 +0000 UTC m=+1416.952050368" watchObservedRunningTime="2026-01-23 11:12:32.392886995 +0000 UTC m=+1417.017566864" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.556028 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.682060 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-87585974f-vcczk"] Jan 23 11:12:32 crc kubenswrapper[4689]: E0123 11:12:32.683306 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-log" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.683372 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-log" Jan 23 11:12:32 crc kubenswrapper[4689]: E0123 11:12:32.683426 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-httpd" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.683438 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-httpd" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.683953 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-log" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.683985 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" containerName="glance-httpd" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.691953 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.697424 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.703432 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.716783 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-scripts\") pod \"5e6a9707-016e-4322-b6ea-ce7dda941021\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.716855 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-config-data\") pod \"5e6a9707-016e-4322-b6ea-ce7dda941021\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.716970 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"5e6a9707-016e-4322-b6ea-ce7dda941021\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.717042 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-logs\") pod \"5e6a9707-016e-4322-b6ea-ce7dda941021\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.717114 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-httpd-run\") pod \"5e6a9707-016e-4322-b6ea-ce7dda941021\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.717179 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-combined-ca-bundle\") pod \"5e6a9707-016e-4322-b6ea-ce7dda941021\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.717278 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzvr8\" (UniqueName: \"kubernetes.io/projected/5e6a9707-016e-4322-b6ea-ce7dda941021-kube-api-access-lzvr8\") pod \"5e6a9707-016e-4322-b6ea-ce7dda941021\" (UID: \"5e6a9707-016e-4322-b6ea-ce7dda941021\") " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.717909 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5e6a9707-016e-4322-b6ea-ce7dda941021" (UID: "5e6a9707-016e-4322-b6ea-ce7dda941021"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.718333 4689 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.720235 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-logs" (OuterVolumeSpecName: "logs") pod "5e6a9707-016e-4322-b6ea-ce7dda941021" (UID: "5e6a9707-016e-4322-b6ea-ce7dda941021"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.727503 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-scripts" (OuterVolumeSpecName: "scripts") pod "5e6a9707-016e-4322-b6ea-ce7dda941021" (UID: "5e6a9707-016e-4322-b6ea-ce7dda941021"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.729614 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-87585974f-vcczk"] Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.730453 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e6a9707-016e-4322-b6ea-ce7dda941021-kube-api-access-lzvr8" (OuterVolumeSpecName: "kube-api-access-lzvr8") pod "5e6a9707-016e-4322-b6ea-ce7dda941021" (UID: "5e6a9707-016e-4322-b6ea-ce7dda941021"). InnerVolumeSpecName "kube-api-access-lzvr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.739599 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20" (OuterVolumeSpecName: "glance") pod "5e6a9707-016e-4322-b6ea-ce7dda941021" (UID: "5e6a9707-016e-4322-b6ea-ce7dda941021"). InnerVolumeSpecName "pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.754167 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e6a9707-016e-4322-b6ea-ce7dda941021" (UID: "5e6a9707-016e-4322-b6ea-ce7dda941021"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.816127 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-config-data" (OuterVolumeSpecName: "config-data") pod "5e6a9707-016e-4322-b6ea-ce7dda941021" (UID: "5e6a9707-016e-4322-b6ea-ce7dda941021"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.820381 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-internal-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.820429 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-config\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.820553 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbw2x\" (UniqueName: \"kubernetes.io/projected/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-kube-api-access-wbw2x\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.820670 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-httpd-config\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.820765 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-ovndb-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.820805 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-public-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.821128 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-combined-ca-bundle\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.821351 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.821374 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzvr8\" (UniqueName: \"kubernetes.io/projected/5e6a9707-016e-4322-b6ea-ce7dda941021-kube-api-access-lzvr8\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.821404 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.821416 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e6a9707-016e-4322-b6ea-ce7dda941021-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.821451 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") on node \"crc\" " Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.821469 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e6a9707-016e-4322-b6ea-ce7dda941021-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.848030 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.848220 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20") on node "crc" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.923733 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-combined-ca-bundle\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.923794 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-internal-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.923816 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-config\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.923841 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbw2x\" (UniqueName: \"kubernetes.io/projected/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-kube-api-access-wbw2x\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.923872 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-httpd-config\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.923930 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-ovndb-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.923951 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-public-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.924046 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.929353 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-httpd-config\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.929471 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-public-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.929481 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-combined-ca-bundle\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.929876 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-internal-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.936815 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-ovndb-tls-certs\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.937255 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-config\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:32 crc kubenswrapper[4689]: I0123 11:12:32.953914 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbw2x\" (UniqueName: \"kubernetes.io/projected/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-kube-api-access-wbw2x\") pod \"neutron-87585974f-vcczk\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.034770 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.303190 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-68cfbc8d8-tc5p4" event={"ID":"5b421cc4-175a-484f-a454-3c38db90b6c5","Type":"ContainerStarted","Data":"28ee2b476157a33b26f53b214620a2fecdeb8cab5264e6c22d9f5b11ab5aa973"} Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.303506 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-68cfbc8d8-tc5p4" event={"ID":"5b421cc4-175a-484f-a454-3c38db90b6c5","Type":"ContainerStarted","Data":"7d4f28db5acde72a671e48084ef2e0c1a6f8eb4c940b079f5ec45706a93fa104"} Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.305316 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e6a9707-016e-4322-b6ea-ce7dda941021","Type":"ContainerDied","Data":"d8471837d6a643c9878f6f6eed28aeaade87b3065df33c2232764b939c2480c6"} Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.305346 4689 scope.go:117] "RemoveContainer" containerID="c7f3c3124f2d03830cb9ed2c51f07b16eed3f49c3b1a05c7644aafafeaa069e0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.305477 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.314567 4689 generic.go:334] "Generic (PLEG): container finished" podID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerID="8c0cd05ab8e4510c97cce55bbfd65475091d68cd4699957d837e72940f27a119" exitCode=0 Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.314612 4689 generic.go:334] "Generic (PLEG): container finished" podID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerID="b0fd81ce1f3137d9fefec80dd7232b4881694ab03f9745191007e1ff8c81b81a" exitCode=143 Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.314628 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7","Type":"ContainerDied","Data":"8c0cd05ab8e4510c97cce55bbfd65475091d68cd4699957d837e72940f27a119"} Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.314667 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7","Type":"ContainerDied","Data":"b0fd81ce1f3137d9fefec80dd7232b4881694ab03f9745191007e1ff8c81b81a"} Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.365613 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.390405 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.415869 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.423995 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.426986 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.427041 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.433408 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.545822 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.545873 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.545898 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.546003 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knpn9\" (UniqueName: \"kubernetes.io/projected/053d67d2-ab83-4be2-8de7-0cd894da7a5b-kube-api-access-knpn9\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.546030 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.546069 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.546087 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.546121 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-logs\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.602045 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-87585974f-vcczk"] Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.648870 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.649855 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.649898 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.650183 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.650214 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knpn9\" (UniqueName: \"kubernetes.io/projected/053d67d2-ab83-4be2-8de7-0cd894da7a5b-kube-api-access-knpn9\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.650453 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.650589 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.650660 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-logs\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.651286 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-logs\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.655107 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.662685 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.662921 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.664169 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.665679 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.665743 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0138e8ab527b9277b6df488760b5dddec306b1306b4e1e9b3fbb986ed4f86327/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.671315 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.689970 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knpn9\" (UniqueName: \"kubernetes.io/projected/053d67d2-ab83-4be2-8de7-0cd894da7a5b-kube-api-access-knpn9\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.690813 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e6a9707-016e-4322-b6ea-ce7dda941021" path="/var/lib/kubelet/pods/5e6a9707-016e-4322-b6ea-ce7dda941021/volumes" Jan 23 11:12:33 crc kubenswrapper[4689]: I0123 11:12:33.789862 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " pod="openstack/glance-default-external-api-0" Jan 23 11:12:34 crc kubenswrapper[4689]: I0123 11:12:34.044937 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:12:35 crc kubenswrapper[4689]: I0123 11:12:35.341059 4689 generic.go:334] "Generic (PLEG): container finished" podID="47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" containerID="1ba8e049bf2430cda7670a46927a921dc626316e0c06256c239ae8673054f611" exitCode=0 Jan 23 11:12:35 crc kubenswrapper[4689]: I0123 11:12:35.341186 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bvhc5" event={"ID":"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4","Type":"ContainerDied","Data":"1ba8e049bf2430cda7670a46927a921dc626316e0c06256c239ae8673054f611"} Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.402848 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7","Type":"ContainerDied","Data":"ee6d399a907fc24fd0863916a63edd9899fa21170c146db37fd536a9a063d1bf"} Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.404132 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee6d399a907fc24fd0863916a63edd9899fa21170c146db37fd536a9a063d1bf" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.405122 4689 scope.go:117] "RemoveContainer" containerID="ce07c69095125ef6813bc798235451cc83c64a5004caff8263178775d7765665" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.601523 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.733398 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bvhc5" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.742391 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.742526 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.768664 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnc66\" (UniqueName: \"kubernetes.io/projected/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-kube-api-access-hnc66\") pod \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.768704 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-scripts\") pod \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.768848 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.768894 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-combined-ca-bundle\") pod \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.768960 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-logs\") pod \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.769014 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-httpd-run\") pod \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.769048 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-config-data\") pod \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\" (UID: \"87eb55ca-7f99-4df8-b1e3-7f78f654d1a7\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.769436 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-logs" (OuterVolumeSpecName: "logs") pod "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" (UID: "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.769725 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" (UID: "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.770211 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.770230 4689 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.776371 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-kube-api-access-hnc66" (OuterVolumeSpecName: "kube-api-access-hnc66") pod "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" (UID: "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7"). InnerVolumeSpecName "kube-api-access-hnc66". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.776485 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-scripts" (OuterVolumeSpecName: "scripts") pod "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" (UID: "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.819825 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44" (OuterVolumeSpecName: "glance") pod "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" (UID: "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7"). InnerVolumeSpecName "pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.857170 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" (UID: "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.858136 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-6cst5"] Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.858443 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" podUID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerName="dnsmasq-dns" containerID="cri-o://5b1d89085b6b85019773ef11b65d013490b50db90cd1df1df31f86d6996dd269" gracePeriod=10 Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872464 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-scripts\") pod \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872508 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-credential-keys\") pod \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872568 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-config-data\") pod \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872655 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-combined-ca-bundle\") pod \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872756 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spk7c\" (UniqueName: \"kubernetes.io/projected/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-kube-api-access-spk7c\") pod \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872831 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2n55\" (UniqueName: \"kubernetes.io/projected/a8604732-adb6-4e50-b9a4-107ebb88d8a4-kube-api-access-t2n55\") pod \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872850 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-config-data\") pod \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872890 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-combined-ca-bundle\") pod \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\" (UID: \"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.872947 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-fernet-keys\") pod \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\" (UID: \"a8604732-adb6-4e50-b9a4-107ebb88d8a4\") " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.873481 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnc66\" (UniqueName: \"kubernetes.io/projected/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-kube-api-access-hnc66\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.873494 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.873515 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") on node \"crc\" " Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.873525 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.882367 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a8604732-adb6-4e50-b9a4-107ebb88d8a4" (UID: "a8604732-adb6-4e50-b9a4-107ebb88d8a4"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.896096 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a8604732-adb6-4e50-b9a4-107ebb88d8a4" (UID: "a8604732-adb6-4e50-b9a4-107ebb88d8a4"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.905479 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8604732-adb6-4e50-b9a4-107ebb88d8a4-kube-api-access-t2n55" (OuterVolumeSpecName: "kube-api-access-t2n55") pod "a8604732-adb6-4e50-b9a4-107ebb88d8a4" (UID: "a8604732-adb6-4e50-b9a4-107ebb88d8a4"). InnerVolumeSpecName "kube-api-access-t2n55". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.918760 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-config-data" (OuterVolumeSpecName: "config-data") pod "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" (UID: "87eb55ca-7f99-4df8-b1e3-7f78f654d1a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.918789 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-kube-api-access-spk7c" (OuterVolumeSpecName: "kube-api-access-spk7c") pod "47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" (UID: "47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4"). InnerVolumeSpecName "kube-api-access-spk7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.918790 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-scripts" (OuterVolumeSpecName: "scripts") pod "a8604732-adb6-4e50-b9a4-107ebb88d8a4" (UID: "a8604732-adb6-4e50-b9a4-107ebb88d8a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.934803 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" (UID: "47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.940137 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.940415 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44") on node "crc" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975643 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975678 4689 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975693 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975704 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spk7c\" (UniqueName: \"kubernetes.io/projected/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-kube-api-access-spk7c\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975718 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2n55\" (UniqueName: \"kubernetes.io/projected/a8604732-adb6-4e50-b9a4-107ebb88d8a4-kube-api-access-t2n55\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975730 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975743 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.975756 4689 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:38 crc kubenswrapper[4689]: I0123 11:12:38.984657 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-config-data" (OuterVolumeSpecName: "config-data") pod "a8604732-adb6-4e50-b9a4-107ebb88d8a4" (UID: "a8604732-adb6-4e50-b9a4-107ebb88d8a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.014013 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8604732-adb6-4e50-b9a4-107ebb88d8a4" (UID: "a8604732-adb6-4e50-b9a4-107ebb88d8a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.077215 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:12:39 crc kubenswrapper[4689]: W0123 11:12:39.077659 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod053d67d2_ab83_4be2_8de7_0cd894da7a5b.slice/crio-b91929c51be4f94c664e8eeeb930cfd1fcf730e0fcbf1dce7c9bb45ce7f2c937 WatchSource:0}: Error finding container b91929c51be4f94c664e8eeeb930cfd1fcf730e0fcbf1dce7c9bb45ce7f2c937: Status 404 returned error can't find the container with id b91929c51be4f94c664e8eeeb930cfd1fcf730e0fcbf1dce7c9bb45ce7f2c937 Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.077695 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.077719 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8604732-adb6-4e50-b9a4-107ebb88d8a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.107614 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-config-data" (OuterVolumeSpecName: "config-data") pod "47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" (UID: "47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.179771 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.480443 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerStarted","Data":"23d224f521e4a9c3ca21f5152e73e05bc3475409625890acc9fb20d80195562d"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.537206 4689 generic.go:334] "Generic (PLEG): container finished" podID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerID="5b1d89085b6b85019773ef11b65d013490b50db90cd1df1df31f86d6996dd269" exitCode=0 Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.537838 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" event={"ID":"22d13f4a-d514-4739-b9f5-bcb6107dd167","Type":"ContainerDied","Data":"5b1d89085b6b85019773ef11b65d013490b50db90cd1df1df31f86d6996dd269"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.549947 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"053d67d2-ab83-4be2-8de7-0cd894da7a5b","Type":"ContainerStarted","Data":"b91929c51be4f94c664e8eeeb930cfd1fcf730e0fcbf1dce7c9bb45ce7f2c937"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.574986 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87585974f-vcczk" event={"ID":"b9558d60-662c-4ab0-8a2a-f0ad202a80b0","Type":"ContainerStarted","Data":"f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.575076 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87585974f-vcczk" event={"ID":"b9558d60-662c-4ab0-8a2a-f0ad202a80b0","Type":"ContainerStarted","Data":"3b009eaafc2d98626f7f6ff498e47e0f4018b08c813137b96f17e14bcefa6ffc"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.577420 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cn9tn" event={"ID":"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7","Type":"ContainerStarted","Data":"08429da34f62eda07b0b08f13139d9936a2dea2067c362ef1289bc04d2a9ae36"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.588758 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nxsm" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.588759 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nxsm" event={"ID":"a8604732-adb6-4e50-b9a4-107ebb88d8a4","Type":"ContainerDied","Data":"6dcf686040e93816074c79630e9aa2dfb5d2656d619263441159f1b15a9d354a"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.589322 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6dcf686040e93816074c79630e9aa2dfb5d2656d619263441159f1b15a9d354a" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.597565 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-68cfbc8d8-tc5p4" event={"ID":"5b421cc4-175a-484f-a454-3c38db90b6c5","Type":"ContainerStarted","Data":"bc47bc9c20f1c8b4e0c1ab0ac91181ff1dca8dc00673d9ec418db4018a85c77e"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.598921 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.598948 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.603526 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.604477 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bvhc5" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.604760 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bvhc5" event={"ID":"47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4","Type":"ContainerDied","Data":"b1097d8c29abc6ca4cf5302a3741fbd676541e0275f73f48ba198076edcb83cf"} Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.604793 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1097d8c29abc6ca4cf5302a3741fbd676541e0275f73f48ba198076edcb83cf" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.610653 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-cn9tn" podStartSLOduration=3.527819251 podStartE2EDuration="44.61063864s" podCreationTimestamp="2026-01-23 11:11:55 +0000 UTC" firstStartedPulling="2026-01-23 11:11:57.768451236 +0000 UTC m=+1382.393131095" lastFinishedPulling="2026-01-23 11:12:38.851270625 +0000 UTC m=+1423.475950484" observedRunningTime="2026-01-23 11:12:39.6062143 +0000 UTC m=+1424.230894169" watchObservedRunningTime="2026-01-23 11:12:39.61063864 +0000 UTC m=+1424.235318499" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.639542 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-68cfbc8d8-tc5p4" podStartSLOduration=8.639521486 podStartE2EDuration="8.639521486s" podCreationTimestamp="2026-01-23 11:12:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:39.631645416 +0000 UTC m=+1424.256325275" watchObservedRunningTime="2026-01-23 11:12:39.639521486 +0000 UTC m=+1424.264201345" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.706521 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.735568 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.767095 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.780275 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:39 crc kubenswrapper[4689]: E0123 11:12:39.780791 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerName="dnsmasq-dns" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.780807 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerName="dnsmasq-dns" Jan 23 11:12:39 crc kubenswrapper[4689]: E0123 11:12:39.780829 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-httpd" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.780836 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-httpd" Jan 23 11:12:39 crc kubenswrapper[4689]: E0123 11:12:39.780848 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" containerName="heat-db-sync" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.780870 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" containerName="heat-db-sync" Jan 23 11:12:39 crc kubenswrapper[4689]: E0123 11:12:39.780895 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-log" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.780901 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-log" Jan 23 11:12:39 crc kubenswrapper[4689]: E0123 11:12:39.780910 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerName="init" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.780917 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerName="init" Jan 23 11:12:39 crc kubenswrapper[4689]: E0123 11:12:39.780953 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8604732-adb6-4e50-b9a4-107ebb88d8a4" containerName="keystone-bootstrap" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.780960 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8604732-adb6-4e50-b9a4-107ebb88d8a4" containerName="keystone-bootstrap" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.781199 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" containerName="heat-db-sync" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.781216 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-httpd" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.781252 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" containerName="glance-log" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.781268 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8604732-adb6-4e50-b9a4-107ebb88d8a4" containerName="keystone-bootstrap" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.781276 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="22d13f4a-d514-4739-b9f5-bcb6107dd167" containerName="dnsmasq-dns" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.782539 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.784438 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.784694 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.834518 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-sb\") pod \"22d13f4a-d514-4739-b9f5-bcb6107dd167\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.834621 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq7pp\" (UniqueName: \"kubernetes.io/projected/22d13f4a-d514-4739-b9f5-bcb6107dd167-kube-api-access-wq7pp\") pod \"22d13f4a-d514-4739-b9f5-bcb6107dd167\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.834681 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-svc\") pod \"22d13f4a-d514-4739-b9f5-bcb6107dd167\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.834735 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-nb\") pod \"22d13f4a-d514-4739-b9f5-bcb6107dd167\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.834786 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-config\") pod \"22d13f4a-d514-4739-b9f5-bcb6107dd167\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.834932 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-swift-storage-0\") pod \"22d13f4a-d514-4739-b9f5-bcb6107dd167\" (UID: \"22d13f4a-d514-4739-b9f5-bcb6107dd167\") " Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.834984 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.927755 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22d13f4a-d514-4739-b9f5-bcb6107dd167-kube-api-access-wq7pp" (OuterVolumeSpecName: "kube-api-access-wq7pp") pod "22d13f4a-d514-4739-b9f5-bcb6107dd167" (UID: "22d13f4a-d514-4739-b9f5-bcb6107dd167"). InnerVolumeSpecName "kube-api-access-wq7pp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:39 crc kubenswrapper[4689]: I0123 11:12:39.946947 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq7pp\" (UniqueName: \"kubernetes.io/projected/22d13f4a-d514-4739-b9f5-bcb6107dd167-kube-api-access-wq7pp\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.010774 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-777b6f5fc9-72drb"] Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.012750 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.014003 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.016079 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.016205 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.016491 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jvx7x" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.016496 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.017029 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.035414 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-777b6f5fc9-72drb"] Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.048915 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.048968 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.049009 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.049057 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75xds\" (UniqueName: \"kubernetes.io/projected/3415e627-33c2-4457-9cea-4dbd78f4d2b3-kube-api-access-75xds\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.049091 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.049163 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-logs\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.049219 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.049240 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.069808 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "22d13f4a-d514-4739-b9f5-bcb6107dd167" (UID: "22d13f4a-d514-4739-b9f5-bcb6107dd167"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.088053 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-config" (OuterVolumeSpecName: "config") pod "22d13f4a-d514-4739-b9f5-bcb6107dd167" (UID: "22d13f4a-d514-4739-b9f5-bcb6107dd167"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.096630 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "22d13f4a-d514-4739-b9f5-bcb6107dd167" (UID: "22d13f4a-d514-4739-b9f5-bcb6107dd167"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.114665 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "22d13f4a-d514-4739-b9f5-bcb6107dd167" (UID: "22d13f4a-d514-4739-b9f5-bcb6107dd167"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.116504 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "22d13f4a-d514-4739-b9f5-bcb6107dd167" (UID: "22d13f4a-d514-4739-b9f5-bcb6107dd167"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151134 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-public-tls-certs\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151205 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151235 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151273 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151321 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75xds\" (UniqueName: \"kubernetes.io/projected/3415e627-33c2-4457-9cea-4dbd78f4d2b3-kube-api-access-75xds\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151437 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-combined-ca-bundle\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151545 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151589 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-config-data\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151609 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzxdw\" (UniqueName: \"kubernetes.io/projected/036797f0-f940-4ef8-9b43-cc12843d2338-kube-api-access-xzxdw\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151786 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-logs\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151880 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-credential-keys\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151909 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-fernet-keys\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151971 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-internal-tls-certs\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.151991 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.152021 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.152101 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-scripts\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.152505 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.152983 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-logs\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.153752 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.153783 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/13eb8beee2c91a18127e0c8709aa1780f04c8bb652adcd05937e8ea03b9a58c8/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.154023 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.154054 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.154067 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.154078 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.154089 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22d13f4a-d514-4739-b9f5-bcb6107dd167-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.156361 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.156998 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.160994 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.171056 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.175924 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75xds\" (UniqueName: \"kubernetes.io/projected/3415e627-33c2-4457-9cea-4dbd78f4d2b3-kube-api-access-75xds\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.223335 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256420 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-scripts\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256480 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-public-tls-certs\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256567 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-combined-ca-bundle\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256598 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-config-data\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256614 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzxdw\" (UniqueName: \"kubernetes.io/projected/036797f0-f940-4ef8-9b43-cc12843d2338-kube-api-access-xzxdw\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256720 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-credential-keys\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256743 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-fernet-keys\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.256777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-internal-tls-certs\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.264398 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-public-tls-certs\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.266363 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-combined-ca-bundle\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.266599 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-scripts\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.267249 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-config-data\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.268406 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-fernet-keys\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.268414 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-credential-keys\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.298131 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/036797f0-f940-4ef8-9b43-cc12843d2338-internal-tls-certs\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.318538 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzxdw\" (UniqueName: \"kubernetes.io/projected/036797f0-f940-4ef8-9b43-cc12843d2338-kube-api-access-xzxdw\") pod \"keystone-777b6f5fc9-72drb\" (UID: \"036797f0-f940-4ef8-9b43-cc12843d2338\") " pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.336689 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.486127 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.645356 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"053d67d2-ab83-4be2-8de7-0cd894da7a5b","Type":"ContainerStarted","Data":"081eeeb029b6dbd702c8ac6a4c6e88d6920f2af25d4372c65eeee1cf3b685ef5"} Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.651635 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87585974f-vcczk" event={"ID":"b9558d60-662c-4ab0-8a2a-f0ad202a80b0","Type":"ContainerStarted","Data":"4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c"} Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.652347 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-87585974f-vcczk" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.659745 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" event={"ID":"22d13f4a-d514-4739-b9f5-bcb6107dd167","Type":"ContainerDied","Data":"a552a2bb3291f1962a7d05ebcca3aa2d13ee141af183eec3a0a9196154e8af49"} Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.659793 4689 scope.go:117] "RemoveContainer" containerID="5b1d89085b6b85019773ef11b65d013490b50db90cd1df1df31f86d6996dd269" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.660023 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-6cst5" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.723930 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-87585974f-vcczk" podStartSLOduration=8.723905472 podStartE2EDuration="8.723905472s" podCreationTimestamp="2026-01-23 11:12:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:40.685228535 +0000 UTC m=+1425.309908394" watchObservedRunningTime="2026-01-23 11:12:40.723905472 +0000 UTC m=+1425.348585331" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.756460 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-6cst5"] Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.776375 4689 scope.go:117] "RemoveContainer" containerID="f79513ae169b10b27112e9812f17dda5f5b7543441d510a85d6d44d0fc0a69ee" Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.781921 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-6cst5"] Jan 23 11:12:40 crc kubenswrapper[4689]: I0123 11:12:40.893994 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-777b6f5fc9-72drb"] Jan 23 11:12:41 crc kubenswrapper[4689]: I0123 11:12:41.213169 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:12:41 crc kubenswrapper[4689]: W0123 11:12:41.217273 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3415e627_33c2_4457_9cea_4dbd78f4d2b3.slice/crio-62a7d57a2a99bd99c49e43a2d7f7993695f5068357abbcccbb40f00bc57e3cd0 WatchSource:0}: Error finding container 62a7d57a2a99bd99c49e43a2d7f7993695f5068357abbcccbb40f00bc57e3cd0: Status 404 returned error can't find the container with id 62a7d57a2a99bd99c49e43a2d7f7993695f5068357abbcccbb40f00bc57e3cd0 Jan 23 11:12:41 crc kubenswrapper[4689]: I0123 11:12:41.656761 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22d13f4a-d514-4739-b9f5-bcb6107dd167" path="/var/lib/kubelet/pods/22d13f4a-d514-4739-b9f5-bcb6107dd167/volumes" Jan 23 11:12:41 crc kubenswrapper[4689]: I0123 11:12:41.657789 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87eb55ca-7f99-4df8-b1e3-7f78f654d1a7" path="/var/lib/kubelet/pods/87eb55ca-7f99-4df8-b1e3-7f78f654d1a7/volumes" Jan 23 11:12:41 crc kubenswrapper[4689]: I0123 11:12:41.693311 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3415e627-33c2-4457-9cea-4dbd78f4d2b3","Type":"ContainerStarted","Data":"62a7d57a2a99bd99c49e43a2d7f7993695f5068357abbcccbb40f00bc57e3cd0"} Jan 23 11:12:41 crc kubenswrapper[4689]: I0123 11:12:41.695311 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-777b6f5fc9-72drb" event={"ID":"036797f0-f940-4ef8-9b43-cc12843d2338","Type":"ContainerStarted","Data":"25ca86a3d3119f5f269131b0a2bce1b1eb636888644548fd36f1977e5395aaa5"} Jan 23 11:12:41 crc kubenswrapper[4689]: I0123 11:12:41.696716 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:12:41 crc kubenswrapper[4689]: I0123 11:12:41.697825 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"053d67d2-ab83-4be2-8de7-0cd894da7a5b","Type":"ContainerStarted","Data":"c6478ad3e2e904d66aa12366c2d5a0e63131e29f36f036f4d6fb19aff8730df9"} Jan 23 11:12:42 crc kubenswrapper[4689]: I0123 11:12:42.466711 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:12:42 crc kubenswrapper[4689]: I0123 11:12:42.712206 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3415e627-33c2-4457-9cea-4dbd78f4d2b3","Type":"ContainerStarted","Data":"0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635"} Jan 23 11:12:42 crc kubenswrapper[4689]: I0123 11:12:42.717228 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-777b6f5fc9-72drb" event={"ID":"036797f0-f940-4ef8-9b43-cc12843d2338","Type":"ContainerStarted","Data":"adb09bf99725db141c73a68f18f1cc506e6cf93690408d8011616a157df1167b"} Jan 23 11:12:42 crc kubenswrapper[4689]: I0123 11:12:42.717327 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:12:42 crc kubenswrapper[4689]: I0123 11:12:42.812039 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-777b6f5fc9-72drb" podStartSLOduration=3.811933906 podStartE2EDuration="3.811933906s" podCreationTimestamp="2026-01-23 11:12:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:42.735950182 +0000 UTC m=+1427.360630041" watchObservedRunningTime="2026-01-23 11:12:42.811933906 +0000 UTC m=+1427.436613795" Jan 23 11:12:42 crc kubenswrapper[4689]: I0123 11:12:42.834547 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.834514348 podStartE2EDuration="9.834514348s" podCreationTimestamp="2026-01-23 11:12:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:42.782960059 +0000 UTC m=+1427.407639908" watchObservedRunningTime="2026-01-23 11:12:42.834514348 +0000 UTC m=+1427.459194207" Jan 23 11:12:43 crc kubenswrapper[4689]: I0123 11:12:43.727522 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr5lm" event={"ID":"b74dafce-64c6-4c46-886b-bdc4044b9b1e","Type":"ContainerStarted","Data":"80939d29045856874080702ae67aa434c0d30ce973cc878776ecfd85799da35b"} Jan 23 11:12:43 crc kubenswrapper[4689]: I0123 11:12:43.742071 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3415e627-33c2-4457-9cea-4dbd78f4d2b3","Type":"ContainerStarted","Data":"b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9"} Jan 23 11:12:43 crc kubenswrapper[4689]: I0123 11:12:43.752360 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-lr5lm" podStartSLOduration=3.902002429 podStartE2EDuration="48.752339958s" podCreationTimestamp="2026-01-23 11:11:55 +0000 UTC" firstStartedPulling="2026-01-23 11:11:57.553407298 +0000 UTC m=+1382.178087157" lastFinishedPulling="2026-01-23 11:12:42.403744827 +0000 UTC m=+1427.028424686" observedRunningTime="2026-01-23 11:12:43.748416298 +0000 UTC m=+1428.373096157" watchObservedRunningTime="2026-01-23 11:12:43.752339958 +0000 UTC m=+1428.377019817" Jan 23 11:12:43 crc kubenswrapper[4689]: I0123 11:12:43.774780 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.774762356 podStartE2EDuration="4.774762356s" podCreationTimestamp="2026-01-23 11:12:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:43.767313498 +0000 UTC m=+1428.391993357" watchObservedRunningTime="2026-01-23 11:12:43.774762356 +0000 UTC m=+1428.399442215" Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.046670 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.046721 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.085546 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.108716 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.758517 4689 generic.go:334] "Generic (PLEG): container finished" podID="3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" containerID="08429da34f62eda07b0b08f13139d9936a2dea2067c362ef1289bc04d2a9ae36" exitCode=0 Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.759444 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cn9tn" event={"ID":"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7","Type":"ContainerDied","Data":"08429da34f62eda07b0b08f13139d9936a2dea2067c362ef1289bc04d2a9ae36"} Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.760752 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 11:12:44 crc kubenswrapper[4689]: I0123 11:12:44.760789 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 11:12:45 crc kubenswrapper[4689]: I0123 11:12:45.875829 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8wtbq"] Jan 23 11:12:45 crc kubenswrapper[4689]: I0123 11:12:45.879575 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:45 crc kubenswrapper[4689]: I0123 11:12:45.905240 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8wtbq"] Jan 23 11:12:45 crc kubenswrapper[4689]: I0123 11:12:45.914845 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-utilities\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:45 crc kubenswrapper[4689]: I0123 11:12:45.915370 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:45 crc kubenswrapper[4689]: I0123 11:12:45.915486 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-922mk\" (UniqueName: \"kubernetes.io/projected/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-kube-api-access-922mk\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:46 crc kubenswrapper[4689]: I0123 11:12:46.018007 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-utilities\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:46 crc kubenswrapper[4689]: I0123 11:12:46.018196 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:46 crc kubenswrapper[4689]: I0123 11:12:46.018282 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-922mk\" (UniqueName: \"kubernetes.io/projected/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-kube-api-access-922mk\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:46 crc kubenswrapper[4689]: I0123 11:12:46.019426 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-utilities\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:46 crc kubenswrapper[4689]: I0123 11:12:46.019975 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:46 crc kubenswrapper[4689]: I0123 11:12:46.042559 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-922mk\" (UniqueName: \"kubernetes.io/projected/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-kube-api-access-922mk\") pod \"redhat-operators-8wtbq\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:46 crc kubenswrapper[4689]: I0123 11:12:46.220528 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.531188 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.660366 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.692434 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.706371 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfgwf\" (UniqueName: \"kubernetes.io/projected/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-kube-api-access-pfgwf\") pod \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.706464 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-combined-ca-bundle\") pod \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.706603 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-db-sync-config-data\") pod \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\" (UID: \"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7\") " Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.716327 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" (UID: "3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.716462 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-kube-api-access-pfgwf" (OuterVolumeSpecName: "kube-api-access-pfgwf") pod "3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" (UID: "3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7"). InnerVolumeSpecName "kube-api-access-pfgwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.775228 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" (UID: "3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.811026 4689 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.811052 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfgwf\" (UniqueName: \"kubernetes.io/projected/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-kube-api-access-pfgwf\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.811062 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.835628 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cn9tn" Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.843252 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cn9tn" event={"ID":"3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7","Type":"ContainerDied","Data":"27166c1837adbdc89c5481c475709f8ee52c3b8e7825b024b01591831f702fd0"} Jan 23 11:12:49 crc kubenswrapper[4689]: I0123 11:12:49.843319 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27166c1837adbdc89c5481c475709f8ee52c3b8e7825b024b01591831f702fd0" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.487831 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.488165 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.517631 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.537593 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.772861 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-78cb64f85f-sf2tl"] Jan 23 11:12:50 crc kubenswrapper[4689]: E0123 11:12:50.773561 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" containerName="barbican-db-sync" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.773589 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" containerName="barbican-db-sync" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.773916 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" containerName="barbican-db-sync" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.775600 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.778772 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.778997 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.779099 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-9sfnl" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.788891 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-78cb64f85f-sf2tl"] Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.853079 4689 generic.go:334] "Generic (PLEG): container finished" podID="b74dafce-64c6-4c46-886b-bdc4044b9b1e" containerID="80939d29045856874080702ae67aa434c0d30ce973cc878776ecfd85799da35b" exitCode=0 Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.853120 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr5lm" event={"ID":"b74dafce-64c6-4c46-886b-bdc4044b9b1e","Type":"ContainerDied","Data":"80939d29045856874080702ae67aa434c0d30ce973cc878776ecfd85799da35b"} Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.853770 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.853789 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.891317 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-8756468f4-gcf24"] Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.897560 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.907334 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.932156 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-8756468f4-gcf24"] Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.942621 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-config-data-custom\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.942756 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7920087-af57-4092-8d74-0bcb75fc9e9d-logs\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.942857 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-combined-ca-bundle\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.942886 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmqbs\" (UniqueName: \"kubernetes.io/projected/a7920087-af57-4092-8d74-0bcb75fc9e9d-kube-api-access-nmqbs\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:50 crc kubenswrapper[4689]: I0123 11:12:50.942917 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-config-data\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.003431 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-v4jcx"] Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.010720 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.029577 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-v4jcx"] Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.045388 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-config-data-custom\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.045525 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7920087-af57-4092-8d74-0bcb75fc9e9d-logs\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.045628 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2m8d\" (UniqueName: \"kubernetes.io/projected/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-kube-api-access-c2m8d\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.045977 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7920087-af57-4092-8d74-0bcb75fc9e9d-logs\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.046021 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-config-data\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.046047 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-logs\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.046125 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-combined-ca-bundle\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.046167 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmqbs\" (UniqueName: \"kubernetes.io/projected/a7920087-af57-4092-8d74-0bcb75fc9e9d-kube-api-access-nmqbs\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.046193 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-config-data-custom\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.046216 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-config-data\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.046234 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-combined-ca-bundle\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.070844 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-config-data-custom\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.080861 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-config-data\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.081373 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7920087-af57-4092-8d74-0bcb75fc9e9d-combined-ca-bundle\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.083591 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmqbs\" (UniqueName: \"kubernetes.io/projected/a7920087-af57-4092-8d74-0bcb75fc9e9d-kube-api-access-nmqbs\") pod \"barbican-worker-78cb64f85f-sf2tl\" (UID: \"a7920087-af57-4092-8d74-0bcb75fc9e9d\") " pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.137728 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-78cb64f85f-sf2tl" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.152467 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.152730 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-svc\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.152847 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.152937 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.153031 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2m8d\" (UniqueName: \"kubernetes.io/projected/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-kube-api-access-c2m8d\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.153122 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mtm8\" (UniqueName: \"kubernetes.io/projected/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-kube-api-access-4mtm8\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.153244 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-config-data\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.153329 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-logs\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.153423 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-config\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.153520 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-config-data-custom\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.153598 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-combined-ca-bundle\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.154696 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-logs\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.159204 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-combined-ca-bundle\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.173486 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-config-data\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.185814 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-config-data-custom\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.189212 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-765b5fc8f6-vfzgp"] Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.191011 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.200506 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.206762 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2m8d\" (UniqueName: \"kubernetes.io/projected/fbdf1ea2-a667-4b97-b775-c76f3bb7f235-kube-api-access-c2m8d\") pod \"barbican-keystone-listener-8756468f4-gcf24\" (UID: \"fbdf1ea2-a667-4b97-b775-c76f3bb7f235\") " pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.234530 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8756468f4-gcf24" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.234928 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-765b5fc8f6-vfzgp"] Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255583 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255685 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255751 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-svc\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255795 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data-custom\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255821 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctn5q\" (UniqueName: \"kubernetes.io/projected/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-kube-api-access-ctn5q\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255871 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255898 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255936 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-combined-ca-bundle\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.255964 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mtm8\" (UniqueName: \"kubernetes.io/projected/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-kube-api-access-4mtm8\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.256000 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-logs\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.256043 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-config\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.257831 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-config\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.257903 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.258416 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.258498 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-svc\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.258950 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.296400 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mtm8\" (UniqueName: \"kubernetes.io/projected/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-kube-api-access-4mtm8\") pod \"dnsmasq-dns-688c87cc99-v4jcx\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.358597 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-combined-ca-bundle\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.358901 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-logs\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.359057 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.359542 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data-custom\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.361049 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctn5q\" (UniqueName: \"kubernetes.io/projected/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-kube-api-access-ctn5q\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.362069 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-logs\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.364342 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-combined-ca-bundle\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.364481 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.368325 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data-custom\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.379133 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctn5q\" (UniqueName: \"kubernetes.io/projected/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-kube-api-access-ctn5q\") pod \"barbican-api-765b5fc8f6-vfzgp\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.430938 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:51 crc kubenswrapper[4689]: I0123 11:12:51.571567 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:52 crc kubenswrapper[4689]: E0123 11:12:52.473801 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.668799 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.810056 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-scripts\") pod \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.810273 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-db-sync-config-data\") pod \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.810350 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-config-data\") pod \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.810388 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-combined-ca-bundle\") pod \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.810450 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbg45\" (UniqueName: \"kubernetes.io/projected/b74dafce-64c6-4c46-886b-bdc4044b9b1e-kube-api-access-lbg45\") pod \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.810579 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b74dafce-64c6-4c46-886b-bdc4044b9b1e-etc-machine-id\") pod \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\" (UID: \"b74dafce-64c6-4c46-886b-bdc4044b9b1e\") " Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.811439 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b74dafce-64c6-4c46-886b-bdc4044b9b1e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b74dafce-64c6-4c46-886b-bdc4044b9b1e" (UID: "b74dafce-64c6-4c46-886b-bdc4044b9b1e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.818459 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b74dafce-64c6-4c46-886b-bdc4044b9b1e" (UID: "b74dafce-64c6-4c46-886b-bdc4044b9b1e"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.823665 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-scripts" (OuterVolumeSpecName: "scripts") pod "b74dafce-64c6-4c46-886b-bdc4044b9b1e" (UID: "b74dafce-64c6-4c46-886b-bdc4044b9b1e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.823739 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b74dafce-64c6-4c46-886b-bdc4044b9b1e-kube-api-access-lbg45" (OuterVolumeSpecName: "kube-api-access-lbg45") pod "b74dafce-64c6-4c46-886b-bdc4044b9b1e" (UID: "b74dafce-64c6-4c46-886b-bdc4044b9b1e"). InnerVolumeSpecName "kube-api-access-lbg45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.864634 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b74dafce-64c6-4c46-886b-bdc4044b9b1e" (UID: "b74dafce-64c6-4c46-886b-bdc4044b9b1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.911614 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lr5lm" event={"ID":"b74dafce-64c6-4c46-886b-bdc4044b9b1e","Type":"ContainerDied","Data":"2cd40e67188d5ff4fbdef8ddeb34c96a45f8b2c7ddda2c4418dbe14167aab339"} Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.911649 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2cd40e67188d5ff4fbdef8ddeb34c96a45f8b2c7ddda2c4418dbe14167aab339" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.911700 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lr5lm" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.912944 4689 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b74dafce-64c6-4c46-886b-bdc4044b9b1e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.912969 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.912980 4689 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.912989 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.912999 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbg45\" (UniqueName: \"kubernetes.io/projected/b74dafce-64c6-4c46-886b-bdc4044b9b1e-kube-api-access-lbg45\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.919689 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerStarted","Data":"95a3eff583d14a5c414172bd4f763cad5390b98693f4fc6394c651fc70c99098"} Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.919875 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="ceilometer-notification-agent" containerID="cri-o://2b27ce299afe74cbb7fc99210c5d42e451dff1e01d81ea3cfcab106c00fb4038" gracePeriod=30 Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.919912 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="proxy-httpd" containerID="cri-o://95a3eff583d14a5c414172bd4f763cad5390b98693f4fc6394c651fc70c99098" gracePeriod=30 Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.920008 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="sg-core" containerID="cri-o://23d224f521e4a9c3ca21f5152e73e05bc3475409625890acc9fb20d80195562d" gracePeriod=30 Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.919919 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:12:52 crc kubenswrapper[4689]: I0123 11:12:52.924301 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-765b5fc8f6-vfzgp"] Jan 23 11:12:52 crc kubenswrapper[4689]: W0123 11:12:52.942335 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7920087_af57_4092_8d74_0bcb75fc9e9d.slice/crio-2145f4c4b4ac0c5c207469fb706042678d4d8894229d16c6d1c8b0e1e2f42dbf WatchSource:0}: Error finding container 2145f4c4b4ac0c5c207469fb706042678d4d8894229d16c6d1c8b0e1e2f42dbf: Status 404 returned error can't find the container with id 2145f4c4b4ac0c5c207469fb706042678d4d8894229d16c6d1c8b0e1e2f42dbf Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:52.950892 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8wtbq"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:52.964546 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-v4jcx"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:52.976632 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-78cb64f85f-sf2tl"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.007711 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-config-data" (OuterVolumeSpecName: "config-data") pod "b74dafce-64c6-4c46-886b-bdc4044b9b1e" (UID: "b74dafce-64c6-4c46-886b-bdc4044b9b1e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:12:53 crc kubenswrapper[4689]: W0123 11:12:53.008858 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfbdf1ea2_a667_4b97_b775_c76f3bb7f235.slice/crio-1dca88df835b49e9e235f35873ce5860679499492d133e2586c7260ff5491e30 WatchSource:0}: Error finding container 1dca88df835b49e9e235f35873ce5860679499492d133e2586c7260ff5491e30: Status 404 returned error can't find the container with id 1dca88df835b49e9e235f35873ce5860679499492d133e2586c7260ff5491e30 Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.016950 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74dafce-64c6-4c46-886b-bdc4044b9b1e-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.027183 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-8756468f4-gcf24"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.166672 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:12:53 crc kubenswrapper[4689]: E0123 11:12:53.167092 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b74dafce-64c6-4c46-886b-bdc4044b9b1e" containerName="cinder-db-sync" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.167110 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b74dafce-64c6-4c46-886b-bdc4044b9b1e" containerName="cinder-db-sync" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.167360 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b74dafce-64c6-4c46-886b-bdc4044b9b1e" containerName="cinder-db-sync" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.168504 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.173189 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.223631 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.298615 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-v4jcx"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.334072 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-rt2sk"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.335930 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.351389 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.351429 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.351570 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/222cb23c-4c24-4038-9ff9-bb39b2df776b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.351698 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-scripts\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.351852 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jwpd\" (UniqueName: \"kubernetes.io/projected/222cb23c-4c24-4038-9ff9-bb39b2df776b-kube-api-access-9jwpd\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.351952 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.357727 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-rt2sk"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.376103 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.377839 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.385968 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.392685 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.454796 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455047 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455085 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455126 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/222cb23c-4c24-4038-9ff9-bb39b2df776b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455174 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455206 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-scripts\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455235 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455265 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7m5v\" (UniqueName: \"kubernetes.io/projected/43b8b0e2-f632-41a4-846f-d1111c26633a-kube-api-access-k7m5v\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455307 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jwpd\" (UniqueName: \"kubernetes.io/projected/222cb23c-4c24-4038-9ff9-bb39b2df776b-kube-api-access-9jwpd\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455334 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-config\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455374 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455410 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.455756 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/222cb23c-4c24-4038-9ff9-bb39b2df776b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.463030 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.463109 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-scripts\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.468666 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.471226 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.473503 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jwpd\" (UniqueName: \"kubernetes.io/projected/222cb23c-4c24-4038-9ff9-bb39b2df776b-kube-api-access-9jwpd\") pod \"cinder-scheduler-0\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.561707 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.562055 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.587651 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.589014 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.564704 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.591915 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqkjn\" (UniqueName: \"kubernetes.io/projected/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-kube-api-access-fqkjn\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.594751 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.594840 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.594904 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-logs\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.595009 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.595038 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-scripts\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.595093 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7m5v\" (UniqueName: \"kubernetes.io/projected/43b8b0e2-f632-41a4-846f-d1111c26633a-kube-api-access-k7m5v\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.595143 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data-custom\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.595309 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-config\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.596207 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-config\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.596816 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.597652 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.620450 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.622999 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7m5v\" (UniqueName: \"kubernetes.io/projected/43b8b0e2-f632-41a4-846f-d1111c26633a-kube-api-access-k7m5v\") pod \"dnsmasq-dns-6bb4fc677f-rt2sk\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.698660 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqkjn\" (UniqueName: \"kubernetes.io/projected/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-kube-api-access-fqkjn\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.698731 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.698791 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-logs\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.698831 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-scripts\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.698879 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data-custom\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.698988 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.699032 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.699179 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.706565 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-logs\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.707061 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.709528 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.715006 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data-custom\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.726801 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.739121 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.740646 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-scripts\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.744807 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqkjn\" (UniqueName: \"kubernetes.io/projected/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-kube-api-access-fqkjn\") pod \"cinder-api-0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.748770 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.962435 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8756468f4-gcf24" event={"ID":"fbdf1ea2-a667-4b97-b775-c76f3bb7f235","Type":"ContainerStarted","Data":"1dca88df835b49e9e235f35873ce5860679499492d133e2586c7260ff5491e30"} Jan 23 11:12:53 crc kubenswrapper[4689]: I0123 11:12:53.980577 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-78cb64f85f-sf2tl" event={"ID":"a7920087-af57-4092-8d74-0bcb75fc9e9d","Type":"ContainerStarted","Data":"2145f4c4b4ac0c5c207469fb706042678d4d8894229d16c6d1c8b0e1e2f42dbf"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.006441 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-765b5fc8f6-vfzgp" event={"ID":"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0","Type":"ContainerStarted","Data":"fd64a86e7b1aed59e47a099b6fb57f58163ce37c5d30c306825ce9a4534b16f2"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.006494 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-765b5fc8f6-vfzgp" event={"ID":"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0","Type":"ContainerStarted","Data":"a668e06f23f901f765dc183e0f0ee223a075ceb6dbbfff6985b980c544f2f7c5"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.026528 4689 generic.go:334] "Generic (PLEG): container finished" podID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerID="8f753b39ca88fe7113e07b4b87bed62e256ed39551e9b961889598c74c97e508" exitCode=0 Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.026592 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8wtbq" event={"ID":"24b7f85c-d5f6-4f5b-bb12-887a7c435c60","Type":"ContainerDied","Data":"8f753b39ca88fe7113e07b4b87bed62e256ed39551e9b961889598c74c97e508"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.026618 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8wtbq" event={"ID":"24b7f85c-d5f6-4f5b-bb12-887a7c435c60","Type":"ContainerStarted","Data":"b9e63a772585637227422d874d26993378f985f1b754c7d8578987ab316c7389"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.065921 4689 generic.go:334] "Generic (PLEG): container finished" podID="cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" containerID="3ad3fd60ef40f9cf2ea7c6b9fd750c5c468e7d7ec05c886a3789a1584ed82f71" exitCode=0 Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.066163 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" event={"ID":"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c","Type":"ContainerDied","Data":"3ad3fd60ef40f9cf2ea7c6b9fd750c5c468e7d7ec05c886a3789a1584ed82f71"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.066190 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" event={"ID":"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c","Type":"ContainerStarted","Data":"ea0304a732643a027b661ed7de0da3a8443baf6f4f462e1e9be7ba6bdc5201fa"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.081723 4689 generic.go:334] "Generic (PLEG): container finished" podID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerID="95a3eff583d14a5c414172bd4f763cad5390b98693f4fc6394c651fc70c99098" exitCode=0 Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.081757 4689 generic.go:334] "Generic (PLEG): container finished" podID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerID="23d224f521e4a9c3ca21f5152e73e05bc3475409625890acc9fb20d80195562d" exitCode=2 Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.081777 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerDied","Data":"95a3eff583d14a5c414172bd4f763cad5390b98693f4fc6394c651fc70c99098"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.081802 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerDied","Data":"23d224f521e4a9c3ca21f5152e73e05bc3475409625890acc9fb20d80195562d"} Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.272106 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.272237 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.401042 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.405752 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.795468 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.877623 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.918031 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-rt2sk"] Jan 23 11:12:54 crc kubenswrapper[4689]: W0123 11:12:54.919625 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43b8b0e2_f632_41a4_846f_d1111c26633a.slice/crio-08c3722ed70bd07990327fa8e1262d1317064eda3847b6d1579965f001284eb6 WatchSource:0}: Error finding container 08c3722ed70bd07990327fa8e1262d1317064eda3847b6d1579965f001284eb6: Status 404 returned error can't find the container with id 08c3722ed70bd07990327fa8e1262d1317064eda3847b6d1579965f001284eb6 Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.940066 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-svc\") pod \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.940181 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-config\") pod \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.940242 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mtm8\" (UniqueName: \"kubernetes.io/projected/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-kube-api-access-4mtm8\") pod \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.940261 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-nb\") pod \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.940292 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-sb\") pod \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.940344 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-swift-storage-0\") pod \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\" (UID: \"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c\") " Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.948382 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-kube-api-access-4mtm8" (OuterVolumeSpecName: "kube-api-access-4mtm8") pod "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" (UID: "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c"). InnerVolumeSpecName "kube-api-access-4mtm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.970773 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" (UID: "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.971564 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-config" (OuterVolumeSpecName: "config") pod "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" (UID: "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.975593 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" (UID: "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.986063 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" (UID: "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:54 crc kubenswrapper[4689]: I0123 11:12:54.995099 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" (UID: "cbc440a7-07ce-4d42-a5c9-45028fe0bb5c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.043329 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mtm8\" (UniqueName: \"kubernetes.io/projected/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-kube-api-access-4mtm8\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.043366 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.043378 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.043387 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.043397 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.043406 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.100506 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a11f1dc5-6e97-4344-8ecd-3c2537251ac0","Type":"ContainerStarted","Data":"793d51d94e45fc7eba9da7e070c38542458a4c63ce35ca7785baf76991428aaf"} Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.103322 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-765b5fc8f6-vfzgp" event={"ID":"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0","Type":"ContainerStarted","Data":"bd0a6af6f2e151ed17842795c8ec421f2fae0323ac1c355fed9f395a8bcadeec"} Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.103468 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.109653 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" event={"ID":"cbc440a7-07ce-4d42-a5c9-45028fe0bb5c","Type":"ContainerDied","Data":"ea0304a732643a027b661ed7de0da3a8443baf6f4f462e1e9be7ba6bdc5201fa"} Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.109687 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-v4jcx" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.109717 4689 scope.go:117] "RemoveContainer" containerID="3ad3fd60ef40f9cf2ea7c6b9fd750c5c468e7d7ec05c886a3789a1584ed82f71" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.111462 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" event={"ID":"43b8b0e2-f632-41a4-846f-d1111c26633a","Type":"ContainerStarted","Data":"08c3722ed70bd07990327fa8e1262d1317064eda3847b6d1579965f001284eb6"} Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.127420 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"222cb23c-4c24-4038-9ff9-bb39b2df776b","Type":"ContainerStarted","Data":"c726d0d8ebc75fc5b79b0e1019afc42a2db03855efe2460581f2a8c5e1f09e68"} Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.135796 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-765b5fc8f6-vfzgp" podStartSLOduration=4.135775512 podStartE2EDuration="4.135775512s" podCreationTimestamp="2026-01-23 11:12:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:12:55.122620014 +0000 UTC m=+1439.747299873" watchObservedRunningTime="2026-01-23 11:12:55.135775512 +0000 UTC m=+1439.760455371" Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.215391 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-v4jcx"] Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.230615 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-v4jcx"] Jan 23 11:12:55 crc kubenswrapper[4689]: I0123 11:12:55.656267 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" path="/var/lib/kubelet/pods/cbc440a7-07ce-4d42-a5c9-45028fe0bb5c/volumes" Jan 23 11:12:56 crc kubenswrapper[4689]: I0123 11:12:56.149316 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:12:57 crc kubenswrapper[4689]: I0123 11:12:57.612190 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.168791 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-86d7448556-jq64j"] Jan 23 11:12:58 crc kubenswrapper[4689]: E0123 11:12:58.170068 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" containerName="init" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.170093 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" containerName="init" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.173861 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbc440a7-07ce-4d42-a5c9-45028fe0bb5c" containerName="init" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.178718 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.211072 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.211104 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.216715 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86d7448556-jq64j"] Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.269015 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-internal-tls-certs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.269120 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-config-data-custom\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.269240 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-combined-ca-bundle\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.269388 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wncx7\" (UniqueName: \"kubernetes.io/projected/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-kube-api-access-wncx7\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.269540 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-logs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.269679 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-public-tls-certs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.269742 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-config-data\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.371685 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-config-data-custom\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.371768 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-combined-ca-bundle\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.371821 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wncx7\" (UniqueName: \"kubernetes.io/projected/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-kube-api-access-wncx7\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.371868 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-logs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.371922 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-public-tls-certs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.371947 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-config-data\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.372090 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-internal-tls-certs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.372624 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-logs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.378232 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-internal-tls-certs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.379140 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-config-data\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.382120 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-config-data-custom\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.383637 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-public-tls-certs\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.385087 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-combined-ca-bundle\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.400843 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wncx7\" (UniqueName: \"kubernetes.io/projected/e7fe0b60-2131-41ce-a23d-1ba4eb389afd-kube-api-access-wncx7\") pod \"barbican-api-86d7448556-jq64j\" (UID: \"e7fe0b60-2131-41ce-a23d-1ba4eb389afd\") " pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.533053 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.759486 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.973975 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-87585974f-vcczk"] Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.974240 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-87585974f-vcczk" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-api" containerID="cri-o://f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928" gracePeriod=30 Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.974505 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-87585974f-vcczk" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-httpd" containerID="cri-o://4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c" gracePeriod=30 Jan 23 11:12:58 crc kubenswrapper[4689]: I0123 11:12:58.992299 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-87585974f-vcczk" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.193:9696/\": EOF" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.014140 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6cf4c786cc-4bmzv"] Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.016523 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.072602 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6cf4c786cc-4bmzv"] Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.178092 4689 generic.go:334] "Generic (PLEG): container finished" podID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerID="afc2a4b2e140d8738e750a36eb8d1bb3222248c9355e1d566b72b8a324ce3a9d" exitCode=0 Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.178157 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" event={"ID":"43b8b0e2-f632-41a4-846f-d1111c26633a","Type":"ContainerDied","Data":"afc2a4b2e140d8738e750a36eb8d1bb3222248c9355e1d566b72b8a324ce3a9d"} Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.182629 4689 generic.go:334] "Generic (PLEG): container finished" podID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerID="2b27ce299afe74cbb7fc99210c5d42e451dff1e01d81ea3cfcab106c00fb4038" exitCode=0 Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.182697 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerDied","Data":"2b27ce299afe74cbb7fc99210c5d42e451dff1e01d81ea3cfcab106c00fb4038"} Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.184401 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a11f1dc5-6e97-4344-8ecd-3c2537251ac0","Type":"ContainerStarted","Data":"79ffea6f359015be6b20d789b26088cd9590978a1bcf790408b33ff8bb8e8d81"} Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.196590 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-combined-ca-bundle\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.196636 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-public-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.196657 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn6n2\" (UniqueName: \"kubernetes.io/projected/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-kube-api-access-kn6n2\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.196814 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-ovndb-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.196855 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-internal-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.196934 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-httpd-config\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.196995 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-config\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.298913 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-httpd-config\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.298971 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-config\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.299137 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-combined-ca-bundle\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.299463 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-public-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.299486 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn6n2\" (UniqueName: \"kubernetes.io/projected/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-kube-api-access-kn6n2\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.300210 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-ovndb-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.300277 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-internal-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.304712 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-combined-ca-bundle\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.304794 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-ovndb-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.305372 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-internal-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.305422 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-httpd-config\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.305434 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-public-tls-certs\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.308366 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-config\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.327278 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn6n2\" (UniqueName: \"kubernetes.io/projected/3b0b0aa7-a504-49b8-b6b4-5548b6ee7690-kube-api-access-kn6n2\") pod \"neutron-6cf4c786cc-4bmzv\" (UID: \"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690\") " pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.337676 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:12:59 crc kubenswrapper[4689]: I0123 11:12:59.819801 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.014853 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k86fs\" (UniqueName: \"kubernetes.io/projected/82880b13-2692-46b7-a0b0-fb06b87426dd-kube-api-access-k86fs\") pod \"82880b13-2692-46b7-a0b0-fb06b87426dd\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.015126 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-sg-core-conf-yaml\") pod \"82880b13-2692-46b7-a0b0-fb06b87426dd\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.015342 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-scripts\") pod \"82880b13-2692-46b7-a0b0-fb06b87426dd\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.015434 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-combined-ca-bundle\") pod \"82880b13-2692-46b7-a0b0-fb06b87426dd\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.015633 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-config-data\") pod \"82880b13-2692-46b7-a0b0-fb06b87426dd\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.015737 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-run-httpd\") pod \"82880b13-2692-46b7-a0b0-fb06b87426dd\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.015870 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-log-httpd\") pod \"82880b13-2692-46b7-a0b0-fb06b87426dd\" (UID: \"82880b13-2692-46b7-a0b0-fb06b87426dd\") " Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.016069 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "82880b13-2692-46b7-a0b0-fb06b87426dd" (UID: "82880b13-2692-46b7-a0b0-fb06b87426dd"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.016350 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "82880b13-2692-46b7-a0b0-fb06b87426dd" (UID: "82880b13-2692-46b7-a0b0-fb06b87426dd"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.016751 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.016849 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82880b13-2692-46b7-a0b0-fb06b87426dd-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.027374 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-scripts" (OuterVolumeSpecName: "scripts") pod "82880b13-2692-46b7-a0b0-fb06b87426dd" (UID: "82880b13-2692-46b7-a0b0-fb06b87426dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.027452 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82880b13-2692-46b7-a0b0-fb06b87426dd-kube-api-access-k86fs" (OuterVolumeSpecName: "kube-api-access-k86fs") pod "82880b13-2692-46b7-a0b0-fb06b87426dd" (UID: "82880b13-2692-46b7-a0b0-fb06b87426dd"). InnerVolumeSpecName "kube-api-access-k86fs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.047573 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "82880b13-2692-46b7-a0b0-fb06b87426dd" (UID: "82880b13-2692-46b7-a0b0-fb06b87426dd"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.097481 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82880b13-2692-46b7-a0b0-fb06b87426dd" (UID: "82880b13-2692-46b7-a0b0-fb06b87426dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.119340 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k86fs\" (UniqueName: \"kubernetes.io/projected/82880b13-2692-46b7-a0b0-fb06b87426dd-kube-api-access-k86fs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.119387 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.119402 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.119413 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.137839 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-config-data" (OuterVolumeSpecName: "config-data") pod "82880b13-2692-46b7-a0b0-fb06b87426dd" (UID: "82880b13-2692-46b7-a0b0-fb06b87426dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.205628 4689 generic.go:334] "Generic (PLEG): container finished" podID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerID="4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c" exitCode=0 Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.205699 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87585974f-vcczk" event={"ID":"b9558d60-662c-4ab0-8a2a-f0ad202a80b0","Type":"ContainerDied","Data":"4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c"} Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.209606 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82880b13-2692-46b7-a0b0-fb06b87426dd","Type":"ContainerDied","Data":"205b59abdc76e7a045d76c80c3c580754699be698371b968e4a968e33181c7a6"} Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.209646 4689 scope.go:117] "RemoveContainer" containerID="95a3eff583d14a5c414172bd4f763cad5390b98693f4fc6394c651fc70c99098" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.209654 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.224107 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82880b13-2692-46b7-a0b0-fb06b87426dd-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.307861 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.343494 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.368676 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:00 crc kubenswrapper[4689]: E0123 11:13:00.369194 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="ceilometer-notification-agent" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.369206 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="ceilometer-notification-agent" Jan 23 11:13:00 crc kubenswrapper[4689]: E0123 11:13:00.369220 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="sg-core" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.369226 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="sg-core" Jan 23 11:13:00 crc kubenswrapper[4689]: E0123 11:13:00.369241 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="proxy-httpd" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.369247 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="proxy-httpd" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.369454 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="sg-core" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.369473 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="proxy-httpd" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.369491 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" containerName="ceilometer-notification-agent" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.370341 4689 scope.go:117] "RemoveContainer" containerID="23d224f521e4a9c3ca21f5152e73e05bc3475409625890acc9fb20d80195562d" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.371864 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.375529 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.375754 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.400433 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.478251 4689 scope.go:117] "RemoveContainer" containerID="2b27ce299afe74cbb7fc99210c5d42e451dff1e01d81ea3cfcab106c00fb4038" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.531248 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.531328 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-scripts\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.531359 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56cjj\" (UniqueName: \"kubernetes.io/projected/390af7bf-94c3-41f8-9733-292f15440b98-kube-api-access-56cjj\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.531485 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-config-data\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.531534 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.531620 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-run-httpd\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.531747 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-log-httpd\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633236 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633287 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-scripts\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633344 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56cjj\" (UniqueName: \"kubernetes.io/projected/390af7bf-94c3-41f8-9733-292f15440b98-kube-api-access-56cjj\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633410 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-config-data\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633437 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633487 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-run-httpd\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633537 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-log-httpd\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.633933 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-log-httpd\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.640522 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-run-httpd\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.640534 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.659829 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.661256 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-scripts\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.697889 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-config-data\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.708099 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56cjj\" (UniqueName: \"kubernetes.io/projected/390af7bf-94c3-41f8-9733-292f15440b98-kube-api-access-56cjj\") pod \"ceilometer-0\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.725051 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:00 crc kubenswrapper[4689]: I0123 11:13:00.865946 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86d7448556-jq64j"] Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.193097 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6cf4c786cc-4bmzv"] Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.249524 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86d7448556-jq64j" event={"ID":"e7fe0b60-2131-41ce-a23d-1ba4eb389afd","Type":"ContainerStarted","Data":"619fa2b8b86f7203f330333d0a977c94733f93dd064f520dc9f988c23ccc8fe7"} Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.260026 4689 generic.go:334] "Generic (PLEG): container finished" podID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerID="1dc3e6fb8619da30ce4d89119e5aa084639aded52ce528bd51582a0eb45a0995" exitCode=0 Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.260178 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8wtbq" event={"ID":"24b7f85c-d5f6-4f5b-bb12-887a7c435c60","Type":"ContainerDied","Data":"1dc3e6fb8619da30ce4d89119e5aa084639aded52ce528bd51582a0eb45a0995"} Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.269765 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" event={"ID":"43b8b0e2-f632-41a4-846f-d1111c26633a","Type":"ContainerStarted","Data":"f470a83fadb9b2103418b52dff07e0f8881f21600363d8299b24a5cc45121b1f"} Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.269917 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.295880 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8756468f4-gcf24" event={"ID":"fbdf1ea2-a667-4b97-b775-c76f3bb7f235","Type":"ContainerStarted","Data":"99abf2ec7361e0f4a459c65477c58b7740298bbca6ebd4607e63093c4f02f292"} Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.331239 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" podStartSLOduration=8.331135413 podStartE2EDuration="8.331135413s" podCreationTimestamp="2026-01-23 11:12:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:01.303809894 +0000 UTC m=+1445.928489753" watchObservedRunningTime="2026-01-23 11:13:01.331135413 +0000 UTC m=+1445.955815272" Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.576038 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.684257 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82880b13-2692-46b7-a0b0-fb06b87426dd" path="/var/lib/kubelet/pods/82880b13-2692-46b7-a0b0-fb06b87426dd/volumes" Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.750182 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-68cfbc8d8-tc5p4" Jan 23 11:13:01 crc kubenswrapper[4689]: I0123 11:13:01.996290 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-87585974f-vcczk" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.003850 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-httpd-config\") pod \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.003943 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-ovndb-tls-certs\") pod \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.003980 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-internal-tls-certs\") pod \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.004012 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-config\") pod \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.004046 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-public-tls-certs\") pod \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.004083 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbw2x\" (UniqueName: \"kubernetes.io/projected/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-kube-api-access-wbw2x\") pod \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.004226 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-combined-ca-bundle\") pod \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\" (UID: \"b9558d60-662c-4ab0-8a2a-f0ad202a80b0\") " Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.011724 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b9558d60-662c-4ab0-8a2a-f0ad202a80b0" (UID: "b9558d60-662c-4ab0-8a2a-f0ad202a80b0"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.013410 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-kube-api-access-wbw2x" (OuterVolumeSpecName: "kube-api-access-wbw2x") pod "b9558d60-662c-4ab0-8a2a-f0ad202a80b0" (UID: "b9558d60-662c-4ab0-8a2a-f0ad202a80b0"). InnerVolumeSpecName "kube-api-access-wbw2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.107068 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbw2x\" (UniqueName: \"kubernetes.io/projected/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-kube-api-access-wbw2x\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.107112 4689 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.204986 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-config" (OuterVolumeSpecName: "config") pod "b9558d60-662c-4ab0-8a2a-f0ad202a80b0" (UID: "b9558d60-662c-4ab0-8a2a-f0ad202a80b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.208285 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.210735 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9558d60-662c-4ab0-8a2a-f0ad202a80b0" (UID: "b9558d60-662c-4ab0-8a2a-f0ad202a80b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.225200 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b9558d60-662c-4ab0-8a2a-f0ad202a80b0" (UID: "b9558d60-662c-4ab0-8a2a-f0ad202a80b0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.230712 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b9558d60-662c-4ab0-8a2a-f0ad202a80b0" (UID: "b9558d60-662c-4ab0-8a2a-f0ad202a80b0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.252370 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b9558d60-662c-4ab0-8a2a-f0ad202a80b0" (UID: "b9558d60-662c-4ab0-8a2a-f0ad202a80b0"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.309902 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.310135 4689 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.310235 4689 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.310308 4689 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9558d60-662c-4ab0-8a2a-f0ad202a80b0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.311595 4689 generic.go:334] "Generic (PLEG): container finished" podID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerID="f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928" exitCode=0 Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.311632 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87585974f-vcczk" event={"ID":"b9558d60-662c-4ab0-8a2a-f0ad202a80b0","Type":"ContainerDied","Data":"f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928"} Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.311673 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-87585974f-vcczk" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.311716 4689 scope.go:117] "RemoveContainer" containerID="4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.311680 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-87585974f-vcczk" event={"ID":"b9558d60-662c-4ab0-8a2a-f0ad202a80b0","Type":"ContainerDied","Data":"3b009eaafc2d98626f7f6ff498e47e0f4018b08c813137b96f17e14bcefa6ffc"} Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.313381 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerStarted","Data":"ace5f5b3e9584358e0745b05b2206e68852cdf7fa48a37b0c5434b884092cf1b"} Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.324557 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8756468f4-gcf24" event={"ID":"fbdf1ea2-a667-4b97-b775-c76f3bb7f235","Type":"ContainerStarted","Data":"9ea77f1b8ca01c72603f3b776317990a2e1800baeecda7f6fa72ca5252e03ee7"} Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.330023 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-78cb64f85f-sf2tl" event={"ID":"a7920087-af57-4092-8d74-0bcb75fc9e9d","Type":"ContainerStarted","Data":"9cf9050e04b35ce1d96a490da25f00d9b6d97ea90886653c85f7f47a84ed233a"} Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.337321 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86d7448556-jq64j" event={"ID":"e7fe0b60-2131-41ce-a23d-1ba4eb389afd","Type":"ContainerStarted","Data":"aa317636aaaae025977392d3c5b201f7a296f00005d58bfe468c366ef8a86157"} Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.355359 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cf4c786cc-4bmzv" event={"ID":"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690","Type":"ContainerStarted","Data":"af0c5d5fee3e27f12f3f6ff7f2d1d234646ef6904d2dc7db5ca33ff4e9c2be6a"} Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.355434 4689 scope.go:117] "RemoveContainer" containerID="f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.362334 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-87585974f-vcczk"] Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.377520 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-87585974f-vcczk"] Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.406446 4689 scope.go:117] "RemoveContainer" containerID="4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c" Jan 23 11:13:02 crc kubenswrapper[4689]: E0123 11:13:02.407331 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c\": container with ID starting with 4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c not found: ID does not exist" containerID="4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.407395 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c"} err="failed to get container status \"4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c\": rpc error: code = NotFound desc = could not find container \"4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c\": container with ID starting with 4d2c902bf9901318bb69d969ae43ac2434da57824c572774da8a6b74b9245b1c not found: ID does not exist" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.407437 4689 scope.go:117] "RemoveContainer" containerID="f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928" Jan 23 11:13:02 crc kubenswrapper[4689]: E0123 11:13:02.407861 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928\": container with ID starting with f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928 not found: ID does not exist" containerID="f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928" Jan 23 11:13:02 crc kubenswrapper[4689]: I0123 11:13:02.407888 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928"} err="failed to get container status \"f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928\": rpc error: code = NotFound desc = could not find container \"f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928\": container with ID starting with f12db91e99c025120890255b9767c44a627ead2fe439d83486e068b57f4ec928 not found: ID does not exist" Jan 23 11:13:03 crc kubenswrapper[4689]: I0123 11:13:03.310516 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:13:03 crc kubenswrapper[4689]: I0123 11:13:03.310840 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:13:03 crc kubenswrapper[4689]: I0123 11:13:03.666364 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" path="/var/lib/kubelet/pods/b9558d60-662c-4ab0-8a2a-f0ad202a80b0/volumes" Jan 23 11:13:04 crc kubenswrapper[4689]: I0123 11:13:04.536619 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:13:04 crc kubenswrapper[4689]: I0123 11:13:04.544263 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.403290 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-78cb64f85f-sf2tl" event={"ID":"a7920087-af57-4092-8d74-0bcb75fc9e9d","Type":"ContainerStarted","Data":"246681ccf587935653f7d8d1564648047b37da5316d1c9a2edecf69fca091b99"} Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.425520 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a11f1dc5-6e97-4344-8ecd-3c2537251ac0","Type":"ContainerStarted","Data":"7709aaf5e05b9e11f2be156ecffb936e2cc185524162a8d1d36ca3d487290d9b"} Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.425727 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api-log" containerID="cri-o://79ffea6f359015be6b20d789b26088cd9590978a1bcf790408b33ff8bb8e8d81" gracePeriod=30 Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.426075 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.426108 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api" containerID="cri-o://7709aaf5e05b9e11f2be156ecffb936e2cc185524162a8d1d36ca3d487290d9b" gracePeriod=30 Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.441315 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-78cb64f85f-sf2tl" podStartSLOduration=7.944976254 podStartE2EDuration="15.441290626s" podCreationTimestamp="2026-01-23 11:12:50 +0000 UTC" firstStartedPulling="2026-01-23 11:12:52.956963549 +0000 UTC m=+1437.581643408" lastFinishedPulling="2026-01-23 11:13:00.453277921 +0000 UTC m=+1445.077957780" observedRunningTime="2026-01-23 11:13:05.425692861 +0000 UTC m=+1450.050372720" watchObservedRunningTime="2026-01-23 11:13:05.441290626 +0000 UTC m=+1450.065970485" Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.443770 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86d7448556-jq64j" event={"ID":"e7fe0b60-2131-41ce-a23d-1ba4eb389afd","Type":"ContainerStarted","Data":"e66e52680b3507b49c26c07e96bffcab017ef1974e789dd30bef5ff9f9e8cf08"} Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.444952 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.444981 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.455887 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cf4c786cc-4bmzv" event={"ID":"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690","Type":"ContainerStarted","Data":"f663574a5b3603f1547d49f37d9a414f2232a12a27a45324e6d8bbfea8e46047"} Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.460182 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=12.460163094 podStartE2EDuration="12.460163094s" podCreationTimestamp="2026-01-23 11:12:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:05.450706819 +0000 UTC m=+1450.075386688" watchObservedRunningTime="2026-01-23 11:13:05.460163094 +0000 UTC m=+1450.084842943" Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.481781 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-86d7448556-jq64j" podStartSLOduration=7.481762534 podStartE2EDuration="7.481762534s" podCreationTimestamp="2026-01-23 11:12:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:05.472571815 +0000 UTC m=+1450.097251674" watchObservedRunningTime="2026-01-23 11:13:05.481762534 +0000 UTC m=+1450.106442393" Jan 23 11:13:05 crc kubenswrapper[4689]: I0123 11:13:05.526717 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-8756468f4-gcf24" podStartSLOduration=8.313357121 podStartE2EDuration="15.526696093s" podCreationTimestamp="2026-01-23 11:12:50 +0000 UTC" firstStartedPulling="2026-01-23 11:12:53.013791539 +0000 UTC m=+1437.638471398" lastFinishedPulling="2026-01-23 11:13:00.227130511 +0000 UTC m=+1444.851810370" observedRunningTime="2026-01-23 11:13:05.523598342 +0000 UTC m=+1450.148278211" watchObservedRunningTime="2026-01-23 11:13:05.526696093 +0000 UTC m=+1450.151375952" Jan 23 11:13:06 crc kubenswrapper[4689]: I0123 11:13:06.493757 4689 generic.go:334] "Generic (PLEG): container finished" podID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerID="79ffea6f359015be6b20d789b26088cd9590978a1bcf790408b33ff8bb8e8d81" exitCode=143 Jan 23 11:13:06 crc kubenswrapper[4689]: I0123 11:13:06.493842 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a11f1dc5-6e97-4344-8ecd-3c2537251ac0","Type":"ContainerDied","Data":"79ffea6f359015be6b20d789b26088cd9590978a1bcf790408b33ff8bb8e8d81"} Jan 23 11:13:06 crc kubenswrapper[4689]: I0123 11:13:06.496771 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cf4c786cc-4bmzv" event={"ID":"3b0b0aa7-a504-49b8-b6b4-5548b6ee7690","Type":"ContainerStarted","Data":"8e2922efaa3aa9dff81532dfb84703d8eac644c0baa786b0b7cbc74f4352cd51"} Jan 23 11:13:06 crc kubenswrapper[4689]: I0123 11:13:06.496917 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:13:06 crc kubenswrapper[4689]: I0123 11:13:06.499171 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"222cb23c-4c24-4038-9ff9-bb39b2df776b","Type":"ContainerStarted","Data":"ae4c00199f58ad5f92086820dd8ba32ad5b007746d6bbed0ad823b658a98a338"} Jan 23 11:13:06 crc kubenswrapper[4689]: I0123 11:13:06.520743 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6cf4c786cc-4bmzv" podStartSLOduration=8.520728751 podStartE2EDuration="8.520728751s" podCreationTimestamp="2026-01-23 11:12:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:06.517656401 +0000 UTC m=+1451.142336250" watchObservedRunningTime="2026-01-23 11:13:06.520728751 +0000 UTC m=+1451.145408610" Jan 23 11:13:07 crc kubenswrapper[4689]: I0123 11:13:07.518944 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"222cb23c-4c24-4038-9ff9-bb39b2df776b","Type":"ContainerStarted","Data":"8429437d5f543289a6e642806fc4054b4d9d9cf9c302f1a35f64dcb28a6f7d5e"} Jan 23 11:13:07 crc kubenswrapper[4689]: I0123 11:13:07.544255 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=8.172326672 podStartE2EDuration="14.544237208s" podCreationTimestamp="2026-01-23 11:12:53 +0000 UTC" firstStartedPulling="2026-01-23 11:12:54.46735341 +0000 UTC m=+1439.092033269" lastFinishedPulling="2026-01-23 11:13:00.839263946 +0000 UTC m=+1445.463943805" observedRunningTime="2026-01-23 11:13:07.536376999 +0000 UTC m=+1452.161056858" watchObservedRunningTime="2026-01-23 11:13:07.544237208 +0000 UTC m=+1452.168917067" Jan 23 11:13:08 crc kubenswrapper[4689]: I0123 11:13:08.708289 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 23 11:13:08 crc kubenswrapper[4689]: I0123 11:13:08.742286 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:13:08 crc kubenswrapper[4689]: I0123 11:13:08.811463 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-lxq4w"] Jan 23 11:13:08 crc kubenswrapper[4689]: I0123 11:13:08.811738 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" podUID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerName="dnsmasq-dns" containerID="cri-o://c7c30db92bdc875cce4aefa0694b1272c32306535a4d8a68b4bc32de3bcb92c5" gracePeriod=10 Jan 23 11:13:09 crc kubenswrapper[4689]: I0123 11:13:09.060285 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:13:10 crc kubenswrapper[4689]: I0123 11:13:10.581579 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerStarted","Data":"39167b78e5dc85a7dc6e90d361d54de67d464904e50658e2753074ed85c4251a"} Jan 23 11:13:10 crc kubenswrapper[4689]: I0123 11:13:10.586414 4689 generic.go:334] "Generic (PLEG): container finished" podID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerID="c7c30db92bdc875cce4aefa0694b1272c32306535a4d8a68b4bc32de3bcb92c5" exitCode=0 Jan 23 11:13:10 crc kubenswrapper[4689]: I0123 11:13:10.586544 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" event={"ID":"980459f0-f118-4e49-9ee4-a05ec5876c2c","Type":"ContainerDied","Data":"c7c30db92bdc875cce4aefa0694b1272c32306535a4d8a68b4bc32de3bcb92c5"} Jan 23 11:13:10 crc kubenswrapper[4689]: I0123 11:13:10.588770 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8wtbq" event={"ID":"24b7f85c-d5f6-4f5b-bb12-887a7c435c60","Type":"ContainerStarted","Data":"0d8088a4de1cdda262a8e859efdc1e86d7af2369f57fc39ec6473db28b41ab97"} Jan 23 11:13:10 crc kubenswrapper[4689]: I0123 11:13:10.610665 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8wtbq" podStartSLOduration=12.453630289 podStartE2EDuration="25.610637514s" podCreationTimestamp="2026-01-23 11:12:45 +0000 UTC" firstStartedPulling="2026-01-23 11:12:54.028593797 +0000 UTC m=+1438.653273646" lastFinishedPulling="2026-01-23 11:13:07.185601012 +0000 UTC m=+1451.810280871" observedRunningTime="2026-01-23 11:13:10.606293656 +0000 UTC m=+1455.230973525" watchObservedRunningTime="2026-01-23 11:13:10.610637514 +0000 UTC m=+1455.235317373" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.600501 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerStarted","Data":"3e9fc1db92380683fa51bc6c49ac4e2a0291cc36dc99dd591628a5bc795df84c"} Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.602685 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" event={"ID":"980459f0-f118-4e49-9ee4-a05ec5876c2c","Type":"ContainerDied","Data":"a81009394aeda1c5845e2a7b680baceaf3d373b141aaaed0415c2fc3f1922ab4"} Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.602735 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a81009394aeda1c5845e2a7b680baceaf3d373b141aaaed0415c2fc3f1922ab4" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.678547 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.751537 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-swift-storage-0\") pod \"980459f0-f118-4e49-9ee4-a05ec5876c2c\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.752229 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx8hs\" (UniqueName: \"kubernetes.io/projected/980459f0-f118-4e49-9ee4-a05ec5876c2c-kube-api-access-vx8hs\") pod \"980459f0-f118-4e49-9ee4-a05ec5876c2c\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.752253 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-config\") pod \"980459f0-f118-4e49-9ee4-a05ec5876c2c\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.752286 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-svc\") pod \"980459f0-f118-4e49-9ee4-a05ec5876c2c\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.752305 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-nb\") pod \"980459f0-f118-4e49-9ee4-a05ec5876c2c\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.752362 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-sb\") pod \"980459f0-f118-4e49-9ee4-a05ec5876c2c\" (UID: \"980459f0-f118-4e49-9ee4-a05ec5876c2c\") " Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.774569 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/980459f0-f118-4e49-9ee4-a05ec5876c2c-kube-api-access-vx8hs" (OuterVolumeSpecName: "kube-api-access-vx8hs") pod "980459f0-f118-4e49-9ee4-a05ec5876c2c" (UID: "980459f0-f118-4e49-9ee4-a05ec5876c2c"). InnerVolumeSpecName "kube-api-access-vx8hs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.843123 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "980459f0-f118-4e49-9ee4-a05ec5876c2c" (UID: "980459f0-f118-4e49-9ee4-a05ec5876c2c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.856446 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx8hs\" (UniqueName: \"kubernetes.io/projected/980459f0-f118-4e49-9ee4-a05ec5876c2c-kube-api-access-vx8hs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.856477 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.861437 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-config" (OuterVolumeSpecName: "config") pod "980459f0-f118-4e49-9ee4-a05ec5876c2c" (UID: "980459f0-f118-4e49-9ee4-a05ec5876c2c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.867176 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "980459f0-f118-4e49-9ee4-a05ec5876c2c" (UID: "980459f0-f118-4e49-9ee4-a05ec5876c2c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.868496 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "980459f0-f118-4e49-9ee4-a05ec5876c2c" (UID: "980459f0-f118-4e49-9ee4-a05ec5876c2c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.886945 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "980459f0-f118-4e49-9ee4-a05ec5876c2c" (UID: "980459f0-f118-4e49-9ee4-a05ec5876c2c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.958613 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.958646 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.958655 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:11 crc kubenswrapper[4689]: I0123 11:13:11.958666 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/980459f0-f118-4e49-9ee4-a05ec5876c2c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:12 crc kubenswrapper[4689]: I0123 11:13:12.615860 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-lxq4w" Jan 23 11:13:12 crc kubenswrapper[4689]: I0123 11:13:12.627229 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerStarted","Data":"a505d2c1b1c494036dc000a78ebf1686e8a39ff058cd2a281348af2779d07f3f"} Jan 23 11:13:12 crc kubenswrapper[4689]: I0123 11:13:12.661693 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-lxq4w"] Jan 23 11:13:12 crc kubenswrapper[4689]: I0123 11:13:12.679878 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-lxq4w"] Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.464728 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-777b6f5fc9-72drb" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.653055 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="980459f0-f118-4e49-9ee4-a05ec5876c2c" path="/var/lib/kubelet/pods/980459f0-f118-4e49-9ee4-a05ec5876c2c/volumes" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.965946 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4tsxl"] Jan 23 11:13:13 crc kubenswrapper[4689]: E0123 11:13:13.966557 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-api" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.966581 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-api" Jan 23 11:13:13 crc kubenswrapper[4689]: E0123 11:13:13.966597 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerName="init" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.966639 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerName="init" Jan 23 11:13:13 crc kubenswrapper[4689]: E0123 11:13:13.966661 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-httpd" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.966672 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-httpd" Jan 23 11:13:13 crc kubenswrapper[4689]: E0123 11:13:13.966725 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerName="dnsmasq-dns" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.966735 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerName="dnsmasq-dns" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.966990 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-httpd" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.967027 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9558d60-662c-4ab0-8a2a-f0ad202a80b0" containerName="neutron-api" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.967053 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="980459f0-f118-4e49-9ee4-a05ec5876c2c" containerName="dnsmasq-dns" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.968971 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:13 crc kubenswrapper[4689]: I0123 11:13:13.995110 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4tsxl"] Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.118889 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-catalog-content\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.119073 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8ldr\" (UniqueName: \"kubernetes.io/projected/af994331-2ca0-45e0-ab37-ea1de382cf8d-kube-api-access-v8ldr\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.119202 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-utilities\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.221529 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8ldr\" (UniqueName: \"kubernetes.io/projected/af994331-2ca0-45e0-ab37-ea1de382cf8d-kube-api-access-v8ldr\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.221661 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-utilities\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.221743 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-catalog-content\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.222428 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-catalog-content\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.222600 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-utilities\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.254019 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8ldr\" (UniqueName: \"kubernetes.io/projected/af994331-2ca0-45e0-ab37-ea1de382cf8d-kube-api-access-v8ldr\") pod \"certified-operators-4tsxl\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:14 crc kubenswrapper[4689]: I0123 11:13:14.290707 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:15 crc kubenswrapper[4689]: I0123 11:13:15.457468 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86d7448556-jq64j" Jan 23 11:13:16 crc kubenswrapper[4689]: I0123 11:13:16.221477 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:13:16 crc kubenswrapper[4689]: I0123 11:13:16.221795 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:13:16 crc kubenswrapper[4689]: I0123 11:13:16.245991 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 11:13:16 crc kubenswrapper[4689]: I0123 11:13:16.246107 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:13:16 crc kubenswrapper[4689]: I0123 11:13:16.245997 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 11:13:16 crc kubenswrapper[4689]: I0123 11:13:16.246247 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:13:17 crc kubenswrapper[4689]: I0123 11:13:17.197426 4689 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-s7k65 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 11:13:17 crc kubenswrapper[4689]: I0123 11:13:17.197529 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" podUID="386d7669-fab2-42b9-ac43-767d9ae837b8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:13:17 crc kubenswrapper[4689]: I0123 11:13:17.891487 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 3.595619721s: [/var/lib/containers/storage/overlay/b7697cb60f6e07be02a2cf26eae5d3d480e70b2f23b1d1aa78f5eb23a5f8c845/diff /var/log/pods/openstack-operators_neutron-operator-controller-manager-5d8f59fb49-rt7xn_a9f05c03-72c2-4906-b327-df50d5922d28/manager/0.log]; will not log again for this container unless duration exceeds 2s Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.023622 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-765b5fc8f6-vfzgp"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.023951 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-765b5fc8f6-vfzgp" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api-log" containerID="cri-o://fd64a86e7b1aed59e47a099b6fb57f58163ce37c5d30c306825ce9a4534b16f2" gracePeriod=30 Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.024701 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-765b5fc8f6-vfzgp" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api" containerID="cri-o://bd0a6af6f2e151ed17842795c8ec421f2fae0323ac1c355fed9f395a8bcadeec" gracePeriod=30 Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.167405 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.168930 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.175275 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-4jjqr" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.175502 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.175630 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.201235 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.337360 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config-secret\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.337739 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.337767 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.337862 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptz9r\" (UniqueName: \"kubernetes.io/projected/faa5674c-d298-4765-b4a2-883adec9181b-kube-api-access-ptz9r\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.372965 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.436393 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.442631 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config-secret\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.442672 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.442688 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.442741 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptz9r\" (UniqueName: \"kubernetes.io/projected/faa5674c-d298-4765-b4a2-883adec9181b-kube-api-access-ptz9r\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.444053 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.454002 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config-secret\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.460793 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptz9r\" (UniqueName: \"kubernetes.io/projected/faa5674c-d298-4765-b4a2-883adec9181b-kube-api-access-ptz9r\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.464687 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.621288 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.644043 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.667074 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.668505 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.669999 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.698087 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.785652 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerStarted","Data":"117b0f0e510cb934a9c49a820cbf637412206b7118ff24e3256808c41cbd8a5a"} Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.789337 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1a99db63-bd38-464f-b9f9-31bc662fb39d-openstack-config\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.789454 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1a99db63-bd38-464f-b9f9-31bc662fb39d-openstack-config-secret\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.789564 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rshzv\" (UniqueName: \"kubernetes.io/projected/1a99db63-bd38-464f-b9f9-31bc662fb39d-kube-api-access-rshzv\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.789608 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a99db63-bd38-464f-b9f9-31bc662fb39d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.790760 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.795807 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.204:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:13:18 crc kubenswrapper[4689]: E0123 11:13:18.808533 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5abc6a2_ca3a_4c7f_97ef_bc0510e1bfd0.slice/crio-conmon-fd64a86e7b1aed59e47a099b6fb57f58163ce37c5d30c306825ce9a4534b16f2.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:13:18 crc kubenswrapper[4689]: E0123 11:13:18.833404 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5abc6a2_ca3a_4c7f_97ef_bc0510e1bfd0.slice/crio-fd64a86e7b1aed59e47a099b6fb57f58163ce37c5d30c306825ce9a4534b16f2.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.854419 4689 generic.go:334] "Generic (PLEG): container finished" podID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerID="fd64a86e7b1aed59e47a099b6fb57f58163ce37c5d30c306825ce9a4534b16f2" exitCode=143 Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.854697 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="cinder-scheduler" containerID="cri-o://ae4c00199f58ad5f92086820dd8ba32ad5b007746d6bbed0ad823b658a98a338" gracePeriod=30 Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.854957 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-765b5fc8f6-vfzgp" event={"ID":"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0","Type":"ContainerDied","Data":"fd64a86e7b1aed59e47a099b6fb57f58163ce37c5d30c306825ce9a4534b16f2"} Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.855030 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="probe" containerID="cri-o://8429437d5f543289a6e642806fc4054b4d9d9cf9c302f1a35f64dcb28a6f7d5e" gracePeriod=30 Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.881462 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=7.38609699 podStartE2EDuration="18.881443734s" podCreationTimestamp="2026-01-23 11:13:00 +0000 UTC" firstStartedPulling="2026-01-23 11:13:01.680341304 +0000 UTC m=+1446.305021163" lastFinishedPulling="2026-01-23 11:13:13.175688048 +0000 UTC m=+1457.800367907" observedRunningTime="2026-01-23 11:13:18.855446284 +0000 UTC m=+1463.480126143" watchObservedRunningTime="2026-01-23 11:13:18.881443734 +0000 UTC m=+1463.506123593" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.893777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1a99db63-bd38-464f-b9f9-31bc662fb39d-openstack-config\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.893927 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1a99db63-bd38-464f-b9f9-31bc662fb39d-openstack-config-secret\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.893992 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rshzv\" (UniqueName: \"kubernetes.io/projected/1a99db63-bd38-464f-b9f9-31bc662fb39d-kube-api-access-rshzv\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.894025 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a99db63-bd38-464f-b9f9-31bc662fb39d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.896203 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1a99db63-bd38-464f-b9f9-31bc662fb39d-openstack-config\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.907527 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1a99db63-bd38-464f-b9f9-31bc662fb39d-openstack-config-secret\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.922531 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a99db63-bd38-464f-b9f9-31bc662fb39d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:18 crc kubenswrapper[4689]: I0123 11:13:18.957708 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rshzv\" (UniqueName: \"kubernetes.io/projected/1a99db63-bd38-464f-b9f9-31bc662fb39d-kube-api-access-rshzv\") pod \"openstackclient\" (UID: \"1a99db63-bd38-464f-b9f9-31bc662fb39d\") " pod="openstack/openstackclient" Jan 23 11:13:19 crc kubenswrapper[4689]: E0123 11:13:19.001388 4689 log.go:32] "RunPodSandbox from runtime service failed" err=< Jan 23 11:13:19 crc kubenswrapper[4689]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_faa5674c-d298-4765-b4a2-883adec9181b_0(514ce53e9aaf9325e5c01a208fb8770967674b3b98942ca89f27489efb0de8e7): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"514ce53e9aaf9325e5c01a208fb8770967674b3b98942ca89f27489efb0de8e7" Netns:"/var/run/netns/9c4f4d17-c225-4ce7-b2b4-c4ae1d2cd29c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=514ce53e9aaf9325e5c01a208fb8770967674b3b98942ca89f27489efb0de8e7;K8S_POD_UID=faa5674c-d298-4765-b4a2-883adec9181b" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/faa5674c-d298-4765-b4a2-883adec9181b]: expected pod UID "faa5674c-d298-4765-b4a2-883adec9181b" but got "1a99db63-bd38-464f-b9f9-31bc662fb39d" from Kube API Jan 23 11:13:19 crc kubenswrapper[4689]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 11:13:19 crc kubenswrapper[4689]: > Jan 23 11:13:19 crc kubenswrapper[4689]: E0123 11:13:19.001479 4689 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Jan 23 11:13:19 crc kubenswrapper[4689]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_faa5674c-d298-4765-b4a2-883adec9181b_0(514ce53e9aaf9325e5c01a208fb8770967674b3b98942ca89f27489efb0de8e7): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"514ce53e9aaf9325e5c01a208fb8770967674b3b98942ca89f27489efb0de8e7" Netns:"/var/run/netns/9c4f4d17-c225-4ce7-b2b4-c4ae1d2cd29c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=514ce53e9aaf9325e5c01a208fb8770967674b3b98942ca89f27489efb0de8e7;K8S_POD_UID=faa5674c-d298-4765-b4a2-883adec9181b" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/faa5674c-d298-4765-b4a2-883adec9181b]: expected pod UID "faa5674c-d298-4765-b4a2-883adec9181b" but got "1a99db63-bd38-464f-b9f9-31bc662fb39d" from Kube API Jan 23 11:13:19 crc kubenswrapper[4689]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 11:13:19 crc kubenswrapper[4689]: > pod="openstack/openstackclient" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.050479 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.127191 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4tsxl"] Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.573310 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8wtbq" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="registry-server" probeResult="failure" output=< Jan 23 11:13:19 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:13:19 crc kubenswrapper[4689]: > Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.717113 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.864601 4689 generic.go:334] "Generic (PLEG): container finished" podID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerID="38efb61dd2bff4f8d6bb484e978c4d26dc53f0d9d0bfb6076d71667604bf5c08" exitCode=0 Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.864657 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4tsxl" event={"ID":"af994331-2ca0-45e0-ab37-ea1de382cf8d","Type":"ContainerDied","Data":"38efb61dd2bff4f8d6bb484e978c4d26dc53f0d9d0bfb6076d71667604bf5c08"} Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.864712 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4tsxl" event={"ID":"af994331-2ca0-45e0-ab37-ea1de382cf8d","Type":"ContainerStarted","Data":"6674314a2cbba6d08f3f89aab49e91e1431027ab2238d67c96f3a676809bf905"} Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.866529 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"1a99db63-bd38-464f-b9f9-31bc662fb39d","Type":"ContainerStarted","Data":"ed5acd673de30ce360b88978546b34499e3636538b5d14c100b1f00d957d572f"} Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.866623 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.876239 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.891045 4689 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="faa5674c-d298-4765-b4a2-883adec9181b" podUID="1a99db63-bd38-464f-b9f9-31bc662fb39d" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.945310 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config-secret\") pod \"faa5674c-d298-4765-b4a2-883adec9181b\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.945350 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-combined-ca-bundle\") pod \"faa5674c-d298-4765-b4a2-883adec9181b\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.945497 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptz9r\" (UniqueName: \"kubernetes.io/projected/faa5674c-d298-4765-b4a2-883adec9181b-kube-api-access-ptz9r\") pod \"faa5674c-d298-4765-b4a2-883adec9181b\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.945555 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config\") pod \"faa5674c-d298-4765-b4a2-883adec9181b\" (UID: \"faa5674c-d298-4765-b4a2-883adec9181b\") " Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.946100 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "faa5674c-d298-4765-b4a2-883adec9181b" (UID: "faa5674c-d298-4765-b4a2-883adec9181b"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.946376 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.951208 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "faa5674c-d298-4765-b4a2-883adec9181b" (UID: "faa5674c-d298-4765-b4a2-883adec9181b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.952237 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "faa5674c-d298-4765-b4a2-883adec9181b" (UID: "faa5674c-d298-4765-b4a2-883adec9181b"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:19 crc kubenswrapper[4689]: I0123 11:13:19.952939 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/faa5674c-d298-4765-b4a2-883adec9181b-kube-api-access-ptz9r" (OuterVolumeSpecName: "kube-api-access-ptz9r") pod "faa5674c-d298-4765-b4a2-883adec9181b" (UID: "faa5674c-d298-4765-b4a2-883adec9181b"). InnerVolumeSpecName "kube-api-access-ptz9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:20 crc kubenswrapper[4689]: I0123 11:13:20.049100 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptz9r\" (UniqueName: \"kubernetes.io/projected/faa5674c-d298-4765-b4a2-883adec9181b-kube-api-access-ptz9r\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:20 crc kubenswrapper[4689]: I0123 11:13:20.049165 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:20 crc kubenswrapper[4689]: I0123 11:13:20.049183 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faa5674c-d298-4765-b4a2-883adec9181b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:20 crc kubenswrapper[4689]: I0123 11:13:20.879374 4689 generic.go:334] "Generic (PLEG): container finished" podID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerID="8429437d5f543289a6e642806fc4054b4d9d9cf9c302f1a35f64dcb28a6f7d5e" exitCode=0 Jan 23 11:13:20 crc kubenswrapper[4689]: I0123 11:13:20.879443 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 11:13:20 crc kubenswrapper[4689]: I0123 11:13:20.879442 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"222cb23c-4c24-4038-9ff9-bb39b2df776b","Type":"ContainerDied","Data":"8429437d5f543289a6e642806fc4054b4d9d9cf9c302f1a35f64dcb28a6f7d5e"} Jan 23 11:13:20 crc kubenswrapper[4689]: I0123 11:13:20.894509 4689 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="faa5674c-d298-4765-b4a2-883adec9181b" podUID="1a99db63-bd38-464f-b9f9-31bc662fb39d" Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.432514 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-765b5fc8f6-vfzgp" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": dial tcp 10.217.0.201:9311: connect: connection refused" Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.432517 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-765b5fc8f6-vfzgp" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": dial tcp 10.217.0.201:9311: connect: connection refused" Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.679783 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="faa5674c-d298-4765-b4a2-883adec9181b" path="/var/lib/kubelet/pods/faa5674c-d298-4765-b4a2-883adec9181b/volumes" Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.927919 4689 generic.go:334] "Generic (PLEG): container finished" podID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerID="ae4c00199f58ad5f92086820dd8ba32ad5b007746d6bbed0ad823b658a98a338" exitCode=0 Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.928318 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"222cb23c-4c24-4038-9ff9-bb39b2df776b","Type":"ContainerDied","Data":"ae4c00199f58ad5f92086820dd8ba32ad5b007746d6bbed0ad823b658a98a338"} Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.930086 4689 generic.go:334] "Generic (PLEG): container finished" podID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerID="bd0a6af6f2e151ed17842795c8ec421f2fae0323ac1c355fed9f395a8bcadeec" exitCode=0 Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.930123 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-765b5fc8f6-vfzgp" event={"ID":"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0","Type":"ContainerDied","Data":"bd0a6af6f2e151ed17842795c8ec421f2fae0323ac1c355fed9f395a8bcadeec"} Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.930137 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-765b5fc8f6-vfzgp" event={"ID":"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0","Type":"ContainerDied","Data":"a668e06f23f901f765dc183e0f0ee223a075ceb6dbbfff6985b980c544f2f7c5"} Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.930166 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a668e06f23f901f765dc183e0f0ee223a075ceb6dbbfff6985b980c544f2f7c5" Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.945764 4689 generic.go:334] "Generic (PLEG): container finished" podID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerID="cbd0b58973fe413087706d6f684a397fad4571e933d2cf28aeecc0787f330db8" exitCode=0 Jan 23 11:13:21 crc kubenswrapper[4689]: I0123 11:13:21.945844 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4tsxl" event={"ID":"af994331-2ca0-45e0-ab37-ea1de382cf8d","Type":"ContainerDied","Data":"cbd0b58973fe413087706d6f684a397fad4571e933d2cf28aeecc0787f330db8"} Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.028952 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.095026 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-combined-ca-bundle\") pod \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.095218 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data\") pod \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.095273 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data-custom\") pod \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.095303 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-logs\") pod \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.095405 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctn5q\" (UniqueName: \"kubernetes.io/projected/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-kube-api-access-ctn5q\") pod \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\" (UID: \"c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.105564 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-logs" (OuterVolumeSpecName: "logs") pod "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" (UID: "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.107387 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-kube-api-access-ctn5q" (OuterVolumeSpecName: "kube-api-access-ctn5q") pod "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" (UID: "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0"). InnerVolumeSpecName "kube-api-access-ctn5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.118328 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" (UID: "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.152228 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" (UID: "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.171589 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.181455 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data" (OuterVolumeSpecName: "config-data") pod "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" (UID: "c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.196962 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data\") pod \"222cb23c-4c24-4038-9ff9-bb39b2df776b\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.197067 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jwpd\" (UniqueName: \"kubernetes.io/projected/222cb23c-4c24-4038-9ff9-bb39b2df776b-kube-api-access-9jwpd\") pod \"222cb23c-4c24-4038-9ff9-bb39b2df776b\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.197084 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/222cb23c-4c24-4038-9ff9-bb39b2df776b-etc-machine-id\") pod \"222cb23c-4c24-4038-9ff9-bb39b2df776b\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.197114 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-scripts\") pod \"222cb23c-4c24-4038-9ff9-bb39b2df776b\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.197313 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-combined-ca-bundle\") pod \"222cb23c-4c24-4038-9ff9-bb39b2df776b\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.197389 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data-custom\") pod \"222cb23c-4c24-4038-9ff9-bb39b2df776b\" (UID: \"222cb23c-4c24-4038-9ff9-bb39b2df776b\") " Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.198005 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.198019 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.198027 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.198038 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.198046 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctn5q\" (UniqueName: \"kubernetes.io/projected/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0-kube-api-access-ctn5q\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.198586 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/222cb23c-4c24-4038-9ff9-bb39b2df776b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "222cb23c-4c24-4038-9ff9-bb39b2df776b" (UID: "222cb23c-4c24-4038-9ff9-bb39b2df776b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.203826 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-scripts" (OuterVolumeSpecName: "scripts") pod "222cb23c-4c24-4038-9ff9-bb39b2df776b" (UID: "222cb23c-4c24-4038-9ff9-bb39b2df776b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.205028 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/222cb23c-4c24-4038-9ff9-bb39b2df776b-kube-api-access-9jwpd" (OuterVolumeSpecName: "kube-api-access-9jwpd") pod "222cb23c-4c24-4038-9ff9-bb39b2df776b" (UID: "222cb23c-4c24-4038-9ff9-bb39b2df776b"). InnerVolumeSpecName "kube-api-access-9jwpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.206260 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "222cb23c-4c24-4038-9ff9-bb39b2df776b" (UID: "222cb23c-4c24-4038-9ff9-bb39b2df776b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.289014 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "222cb23c-4c24-4038-9ff9-bb39b2df776b" (UID: "222cb23c-4c24-4038-9ff9-bb39b2df776b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.298855 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.298890 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jwpd\" (UniqueName: \"kubernetes.io/projected/222cb23c-4c24-4038-9ff9-bb39b2df776b-kube-api-access-9jwpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.298904 4689 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/222cb23c-4c24-4038-9ff9-bb39b2df776b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.298912 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.298921 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.353535 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data" (OuterVolumeSpecName: "config-data") pod "222cb23c-4c24-4038-9ff9-bb39b2df776b" (UID: "222cb23c-4c24-4038-9ff9-bb39b2df776b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.400820 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222cb23c-4c24-4038-9ff9-bb39b2df776b-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.971926 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-765b5fc8f6-vfzgp" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.972997 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.973025 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"222cb23c-4c24-4038-9ff9-bb39b2df776b","Type":"ContainerDied","Data":"c726d0d8ebc75fc5b79b0e1019afc42a2db03855efe2460581f2a8c5e1f09e68"} Jan 23 11:13:22 crc kubenswrapper[4689]: I0123 11:13:22.978204 4689 scope.go:117] "RemoveContainer" containerID="8429437d5f543289a6e642806fc4054b4d9d9cf9c302f1a35f64dcb28a6f7d5e" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.049854 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.060782 4689 scope.go:117] "RemoveContainer" containerID="ae4c00199f58ad5f92086820dd8ba32ad5b007746d6bbed0ad823b658a98a338" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.065064 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.076538 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:13:23 crc kubenswrapper[4689]: E0123 11:13:23.077048 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api-log" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077066 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api-log" Jan 23 11:13:23 crc kubenswrapper[4689]: E0123 11:13:23.077088 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="probe" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077095 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="probe" Jan 23 11:13:23 crc kubenswrapper[4689]: E0123 11:13:23.077105 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077111 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api" Jan 23 11:13:23 crc kubenswrapper[4689]: E0123 11:13:23.077139 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="cinder-scheduler" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077241 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="cinder-scheduler" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077500 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="cinder-scheduler" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077524 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api-log" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077535 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" containerName="barbican-api" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.077561 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" containerName="probe" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.079205 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.081624 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.090278 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-765b5fc8f6-vfzgp"] Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.120527 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.123114 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-config-data\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.123266 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-scripts\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.123309 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.123331 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.123576 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njcw2\" (UniqueName: \"kubernetes.io/projected/543b7065-0dd2-411e-9854-8aaa3e11dd3e-kube-api-access-njcw2\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.123664 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/543b7065-0dd2-411e-9854-8aaa3e11dd3e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.134215 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-765b5fc8f6-vfzgp"] Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.224697 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/543b7065-0dd2-411e-9854-8aaa3e11dd3e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.224776 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-config-data\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.224826 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/543b7065-0dd2-411e-9854-8aaa3e11dd3e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.224874 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-scripts\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.224911 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.225649 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.225766 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njcw2\" (UniqueName: \"kubernetes.io/projected/543b7065-0dd2-411e-9854-8aaa3e11dd3e-kube-api-access-njcw2\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.230178 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-scripts\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.230572 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-config-data\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.231450 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.241059 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543b7065-0dd2-411e-9854-8aaa3e11dd3e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.253038 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njcw2\" (UniqueName: \"kubernetes.io/projected/543b7065-0dd2-411e-9854-8aaa3e11dd3e-kube-api-access-njcw2\") pod \"cinder-scheduler-0\" (UID: \"543b7065-0dd2-411e-9854-8aaa3e11dd3e\") " pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.405313 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.659772 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="222cb23c-4c24-4038-9ff9-bb39b2df776b" path="/var/lib/kubelet/pods/222cb23c-4c24-4038-9ff9-bb39b2df776b/volumes" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.661795 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0" path="/var/lib/kubelet/pods/c5abc6a2-ca3a-4c7f-97ef-bc0510e1bfd0/volumes" Jan 23 11:13:23 crc kubenswrapper[4689]: I0123 11:13:23.839314 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.204:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:13:24 crc kubenswrapper[4689]: I0123 11:13:24.200531 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 11:13:25 crc kubenswrapper[4689]: I0123 11:13:25.012176 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"543b7065-0dd2-411e-9854-8aaa3e11dd3e","Type":"ContainerStarted","Data":"e8cfc975a64bd05f605c67629b1ba167f129cce44025bca8980c259d587c22eb"} Jan 23 11:13:26 crc kubenswrapper[4689]: I0123 11:13:26.038732 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4tsxl" event={"ID":"af994331-2ca0-45e0-ab37-ea1de382cf8d","Type":"ContainerStarted","Data":"891f6558ae22e860a4ccf2c04da7badfb43b325b51c715478b339598170cfffb"} Jan 23 11:13:26 crc kubenswrapper[4689]: I0123 11:13:26.046136 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"543b7065-0dd2-411e-9854-8aaa3e11dd3e","Type":"ContainerStarted","Data":"d3dd4e2cf5fd0390b09097eb990bae71c1a878f4111c85e60e6bc164711fdefa"} Jan 23 11:13:26 crc kubenswrapper[4689]: I0123 11:13:26.065414 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4tsxl" podStartSLOduration=7.939650752 podStartE2EDuration="13.065391771s" podCreationTimestamp="2026-01-23 11:13:13 +0000 UTC" firstStartedPulling="2026-01-23 11:13:19.866631511 +0000 UTC m=+1464.491311370" lastFinishedPulling="2026-01-23 11:13:24.99237253 +0000 UTC m=+1469.617052389" observedRunningTime="2026-01-23 11:13:26.059683881 +0000 UTC m=+1470.684363750" watchObservedRunningTime="2026-01-23 11:13:26.065391771 +0000 UTC m=+1470.690071630" Jan 23 11:13:26 crc kubenswrapper[4689]: I0123 11:13:26.629723 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.061859 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"543b7065-0dd2-411e-9854-8aaa3e11dd3e","Type":"ContainerStarted","Data":"37d2373ef47c2186e02883e57f62c9daf2aa797b560cc6665b3100fb05d7ce68"} Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.084084 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.084062787 podStartE2EDuration="4.084062787s" podCreationTimestamp="2026-01-23 11:13:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:27.075769949 +0000 UTC m=+1471.700449838" watchObservedRunningTime="2026-01-23 11:13:27.084062787 +0000 UTC m=+1471.708742646" Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.284634 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8wtbq" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="registry-server" probeResult="failure" output=< Jan 23 11:13:27 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:13:27 crc kubenswrapper[4689]: > Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.951945 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5b54798db9-jfwb5"] Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.953939 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.959132 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.959325 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.964361 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 23 11:13:27 crc kubenswrapper[4689]: I0123 11:13:27.978607 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5b54798db9-jfwb5"] Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.121727 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d641a-3762-404a-baff-e2026b4a3896-run-httpd\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.121998 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-combined-ca-bundle\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.122023 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-public-tls-certs\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.122124 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-internal-tls-certs\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.122156 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d641a-3762-404a-baff-e2026b4a3896-log-httpd\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.122260 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab0d641a-3762-404a-baff-e2026b4a3896-etc-swift\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.122308 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5rkx\" (UniqueName: \"kubernetes.io/projected/ab0d641a-3762-404a-baff-e2026b4a3896-kube-api-access-k5rkx\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.122360 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-config-data\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.224952 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab0d641a-3762-404a-baff-e2026b4a3896-etc-swift\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225039 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5rkx\" (UniqueName: \"kubernetes.io/projected/ab0d641a-3762-404a-baff-e2026b4a3896-kube-api-access-k5rkx\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225107 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-config-data\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225195 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d641a-3762-404a-baff-e2026b4a3896-run-httpd\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225234 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-combined-ca-bundle\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225268 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-public-tls-certs\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225421 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-internal-tls-certs\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225477 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d641a-3762-404a-baff-e2026b4a3896-log-httpd\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225852 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d641a-3762-404a-baff-e2026b4a3896-run-httpd\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.225934 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab0d641a-3762-404a-baff-e2026b4a3896-log-httpd\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.232445 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-public-tls-certs\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.233966 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-internal-tls-certs\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.234784 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-combined-ca-bundle\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.247667 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab0d641a-3762-404a-baff-e2026b4a3896-config-data\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.248299 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab0d641a-3762-404a-baff-e2026b4a3896-etc-swift\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.248338 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5rkx\" (UniqueName: \"kubernetes.io/projected/ab0d641a-3762-404a-baff-e2026b4a3896-kube-api-access-k5rkx\") pod \"swift-proxy-5b54798db9-jfwb5\" (UID: \"ab0d641a-3762-404a-baff-e2026b4a3896\") " pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.282494 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:28 crc kubenswrapper[4689]: I0123 11:13:28.406285 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.080644 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.081862 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-central-agent" containerID="cri-o://39167b78e5dc85a7dc6e90d361d54de67d464904e50658e2753074ed85c4251a" gracePeriod=30 Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.081961 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="proxy-httpd" containerID="cri-o://117b0f0e510cb934a9c49a820cbf637412206b7118ff24e3256808c41cbd8a5a" gracePeriod=30 Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.081999 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="sg-core" containerID="cri-o://a505d2c1b1c494036dc000a78ebf1686e8a39ff058cd2a281348af2779d07f3f" gracePeriod=30 Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.082030 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-notification-agent" containerID="cri-o://3e9fc1db92380683fa51bc6c49ac4e2a0291cc36dc99dd591628a5bc795df84c" gracePeriod=30 Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.107943 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.207:3000/\": EOF" Jan 23 11:13:29 crc kubenswrapper[4689]: E0123 11:13:29.177319 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod390af7bf_94c3_41f8_9733_292f15440b98.slice/crio-a505d2c1b1c494036dc000a78ebf1686e8a39ff058cd2a281348af2779d07f3f.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.355514 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6cf4c786cc-4bmzv" Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.444183 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5fdbbd7548-jpgxk"] Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.444476 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5fdbbd7548-jpgxk" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-api" containerID="cri-o://84571097cab79784f9cd491a59409d46cd57299976df5353d6252a8632cee3d2" gracePeriod=30 Jan 23 11:13:29 crc kubenswrapper[4689]: I0123 11:13:29.444631 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5fdbbd7548-jpgxk" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-httpd" containerID="cri-o://fa2d601b21d2ee1b57a47a47dcd543ffe7bf1592de35d3457dde31db80efdab0" gracePeriod=30 Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.112461 4689 generic.go:334] "Generic (PLEG): container finished" podID="390af7bf-94c3-41f8-9733-292f15440b98" containerID="117b0f0e510cb934a9c49a820cbf637412206b7118ff24e3256808c41cbd8a5a" exitCode=0 Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.112491 4689 generic.go:334] "Generic (PLEG): container finished" podID="390af7bf-94c3-41f8-9733-292f15440b98" containerID="a505d2c1b1c494036dc000a78ebf1686e8a39ff058cd2a281348af2779d07f3f" exitCode=2 Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.112499 4689 generic.go:334] "Generic (PLEG): container finished" podID="390af7bf-94c3-41f8-9733-292f15440b98" containerID="39167b78e5dc85a7dc6e90d361d54de67d464904e50658e2753074ed85c4251a" exitCode=0 Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.112550 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerDied","Data":"117b0f0e510cb934a9c49a820cbf637412206b7118ff24e3256808c41cbd8a5a"} Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.112604 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerDied","Data":"a505d2c1b1c494036dc000a78ebf1686e8a39ff058cd2a281348af2779d07f3f"} Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.112618 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerDied","Data":"39167b78e5dc85a7dc6e90d361d54de67d464904e50658e2753074ed85c4251a"} Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.114792 4689 generic.go:334] "Generic (PLEG): container finished" podID="5760eb27-db85-406c-8367-f03313a9a14a" containerID="fa2d601b21d2ee1b57a47a47dcd543ffe7bf1592de35d3457dde31db80efdab0" exitCode=0 Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.114822 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fdbbd7548-jpgxk" event={"ID":"5760eb27-db85-406c-8367-f03313a9a14a","Type":"ContainerDied","Data":"fa2d601b21d2ee1b57a47a47dcd543ffe7bf1592de35d3457dde31db80efdab0"} Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.376761 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-757d5d5668-vt8hl"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.378387 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.381323 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.382281 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-5trhz" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.385652 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.394768 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-757d5d5668-vt8hl"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.501589 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data-custom\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.501672 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgwcc\" (UniqueName: \"kubernetes.io/projected/49fd7217-901d-4fa1-b3f2-2a883295cf83-kube-api-access-sgwcc\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.501695 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.501734 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-combined-ca-bundle\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.517688 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d978555f9-bkg4n"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.530054 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.542759 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d978555f9-bkg4n"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.605500 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-6dbc994bcd-6lt7s"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620245 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620337 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v2qh\" (UniqueName: \"kubernetes.io/projected/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-kube-api-access-4v2qh\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620431 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-combined-ca-bundle\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620601 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620652 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-sb\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620709 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-config\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620812 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-svc\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.620920 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-swift-storage-0\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.621019 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data-custom\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.621137 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgwcc\" (UniqueName: \"kubernetes.io/projected/49fd7217-901d-4fa1-b3f2-2a883295cf83-kube-api-access-sgwcc\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.630131 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.636635 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-combined-ca-bundle\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.644505 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.648420 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.677534 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgwcc\" (UniqueName: \"kubernetes.io/projected/49fd7217-901d-4fa1-b3f2-2a883295cf83-kube-api-access-sgwcc\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.707523 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6dbc994bcd-6lt7s"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.710590 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data-custom\") pod \"heat-engine-757d5d5668-vt8hl\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.738900 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.207:3000/\": dial tcp 10.217.0.207:3000: connect: connection refused" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.742616 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7b8f8c96b8-rjrg7"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.744717 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.746312 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v2qh\" (UniqueName: \"kubernetes.io/projected/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-kube-api-access-4v2qh\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.747669 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.748726 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-sb\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.749777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-config\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.750615 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-svc\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.751478 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-swift-storage-0\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.750531 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-config\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.749715 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-sb\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.751374 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-svc\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.748673 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-nb\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.752216 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-swift-storage-0\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.752711 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.767190 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v2qh\" (UniqueName: \"kubernetes.io/projected/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-kube-api-access-4v2qh\") pod \"dnsmasq-dns-7d978555f9-bkg4n\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.782228 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b8f8c96b8-rjrg7"] Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859535 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsdlp\" (UniqueName: \"kubernetes.io/projected/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-kube-api-access-lsdlp\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859612 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-combined-ca-bundle\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859687 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-combined-ca-bundle\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859717 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data-custom\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859761 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data-custom\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859817 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859854 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhztb\" (UniqueName: \"kubernetes.io/projected/b57b3805-c785-43e0-a8f2-6bab72916aa4-kube-api-access-nhztb\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.859887 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.875185 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.961809 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-combined-ca-bundle\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.962204 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data-custom\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.962775 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data-custom\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.962901 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.962943 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhztb\" (UniqueName: \"kubernetes.io/projected/b57b3805-c785-43e0-a8f2-6bab72916aa4-kube-api-access-nhztb\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.962980 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.963039 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsdlp\" (UniqueName: \"kubernetes.io/projected/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-kube-api-access-lsdlp\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.963097 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-combined-ca-bundle\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.968391 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data-custom\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.968498 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-combined-ca-bundle\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.971715 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data-custom\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.973881 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-combined-ca-bundle\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.979730 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.979904 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.985062 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsdlp\" (UniqueName: \"kubernetes.io/projected/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-kube-api-access-lsdlp\") pod \"heat-cfnapi-6dbc994bcd-6lt7s\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:30 crc kubenswrapper[4689]: I0123 11:13:30.992813 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhztb\" (UniqueName: \"kubernetes.io/projected/b57b3805-c785-43e0-a8f2-6bab72916aa4-kube-api-access-nhztb\") pod \"heat-api-7b8f8c96b8-rjrg7\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:31 crc kubenswrapper[4689]: I0123 11:13:31.003666 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:31 crc kubenswrapper[4689]: I0123 11:13:31.132057 4689 generic.go:334] "Generic (PLEG): container finished" podID="390af7bf-94c3-41f8-9733-292f15440b98" containerID="3e9fc1db92380683fa51bc6c49ac4e2a0291cc36dc99dd591628a5bc795df84c" exitCode=0 Jan 23 11:13:31 crc kubenswrapper[4689]: I0123 11:13:31.132088 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerDied","Data":"3e9fc1db92380683fa51bc6c49ac4e2a0291cc36dc99dd591628a5bc795df84c"} Jan 23 11:13:31 crc kubenswrapper[4689]: I0123 11:13:31.176267 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:31 crc kubenswrapper[4689]: I0123 11:13:31.190791 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:33 crc kubenswrapper[4689]: I0123 11:13:33.311061 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:13:33 crc kubenswrapper[4689]: I0123 11:13:33.311456 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:13:33 crc kubenswrapper[4689]: I0123 11:13:33.809874 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 23 11:13:34 crc kubenswrapper[4689]: I0123 11:13:34.291450 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:34 crc kubenswrapper[4689]: I0123 11:13:34.291813 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:34 crc kubenswrapper[4689]: I0123 11:13:34.373902 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:35 crc kubenswrapper[4689]: I0123 11:13:35.193693 4689 generic.go:334] "Generic (PLEG): container finished" podID="5760eb27-db85-406c-8367-f03313a9a14a" containerID="84571097cab79784f9cd491a59409d46cd57299976df5353d6252a8632cee3d2" exitCode=0 Jan 23 11:13:35 crc kubenswrapper[4689]: I0123 11:13:35.193811 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fdbbd7548-jpgxk" event={"ID":"5760eb27-db85-406c-8367-f03313a9a14a","Type":"ContainerDied","Data":"84571097cab79784f9cd491a59409d46cd57299976df5353d6252a8632cee3d2"} Jan 23 11:13:35 crc kubenswrapper[4689]: I0123 11:13:35.258579 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:35 crc kubenswrapper[4689]: I0123 11:13:35.322256 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4tsxl"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.211576 4689 generic.go:334] "Generic (PLEG): container finished" podID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerID="7709aaf5e05b9e11f2be156ecffb936e2cc185524162a8d1d36ca3d487290d9b" exitCode=137 Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.211651 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a11f1dc5-6e97-4344-8ecd-3c2537251ac0","Type":"ContainerDied","Data":"7709aaf5e05b9e11f2be156ecffb936e2cc185524162a8d1d36ca3d487290d9b"} Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.468986 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-r9b2p"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.470767 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.504788 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-r9b2p"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.596633 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-7hcxl"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.599590 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.637269 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f707-account-create-update-w6nbh"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.638780 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.642515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a91b673-3b20-4718-91f0-695225bc7f82-operator-scripts\") pod \"nova-api-db-create-r9b2p\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.642640 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84trr\" (UniqueName: \"kubernetes.io/projected/2a91b673-3b20-4718-91f0-695225bc7f82-kube-api-access-84trr\") pod \"nova-api-db-create-r9b2p\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.645588 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.677265 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7hcxl"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.694122 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f707-account-create-update-w6nbh"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.748395 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fthlg\" (UniqueName: \"kubernetes.io/projected/3cde63b4-6033-4530-884d-55d2274538c5-kube-api-access-fthlg\") pod \"nova-api-f707-account-create-update-w6nbh\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.748873 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84trr\" (UniqueName: \"kubernetes.io/projected/2a91b673-3b20-4718-91f0-695225bc7f82-kube-api-access-84trr\") pod \"nova-api-db-create-r9b2p\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.748902 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4348edb-5864-4652-9e63-b2c452905118-operator-scripts\") pod \"nova-cell0-db-create-7hcxl\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.749227 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bs6q\" (UniqueName: \"kubernetes.io/projected/d4348edb-5864-4652-9e63-b2c452905118-kube-api-access-8bs6q\") pod \"nova-cell0-db-create-7hcxl\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.749295 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3cde63b4-6033-4530-884d-55d2274538c5-operator-scripts\") pod \"nova-api-f707-account-create-update-w6nbh\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.749433 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a91b673-3b20-4718-91f0-695225bc7f82-operator-scripts\") pod \"nova-api-db-create-r9b2p\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.750052 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a91b673-3b20-4718-91f0-695225bc7f82-operator-scripts\") pod \"nova-api-db-create-r9b2p\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.782309 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84trr\" (UniqueName: \"kubernetes.io/projected/2a91b673-3b20-4718-91f0-695225bc7f82-kube-api-access-84trr\") pod \"nova-api-db-create-r9b2p\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.822104 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-c4d1-account-create-update-9rq2z"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.830629 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.843414 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.847622 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.855976 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bs6q\" (UniqueName: \"kubernetes.io/projected/d4348edb-5864-4652-9e63-b2c452905118-kube-api-access-8bs6q\") pod \"nova-cell0-db-create-7hcxl\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.856046 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3cde63b4-6033-4530-884d-55d2274538c5-operator-scripts\") pod \"nova-api-f707-account-create-update-w6nbh\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.856156 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fthlg\" (UniqueName: \"kubernetes.io/projected/3cde63b4-6033-4530-884d-55d2274538c5-kube-api-access-fthlg\") pod \"nova-api-f707-account-create-update-w6nbh\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.856262 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4348edb-5864-4652-9e63-b2c452905118-operator-scripts\") pod \"nova-cell0-db-create-7hcxl\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.857098 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4348edb-5864-4652-9e63-b2c452905118-operator-scripts\") pod \"nova-cell0-db-create-7hcxl\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.857860 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3cde63b4-6033-4530-884d-55d2274538c5-operator-scripts\") pod \"nova-api-f707-account-create-update-w6nbh\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.869473 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-cnrbr"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.879963 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bs6q\" (UniqueName: \"kubernetes.io/projected/d4348edb-5864-4652-9e63-b2c452905118-kube-api-access-8bs6q\") pod \"nova-cell0-db-create-7hcxl\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.898591 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.899177 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fthlg\" (UniqueName: \"kubernetes.io/projected/3cde63b4-6033-4530-884d-55d2274538c5-kube-api-access-fthlg\") pod \"nova-api-f707-account-create-update-w6nbh\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.900308 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c4d1-account-create-update-9rq2z"] Jan 23 11:13:36 crc kubenswrapper[4689]: I0123 11:13:36.919652 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-cnrbr"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.960669 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-operator-scripts\") pod \"nova-cell1-db-create-cnrbr\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.961553 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-operator-scripts\") pod \"nova-cell0-c4d1-account-create-update-9rq2z\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.964949 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdltt\" (UniqueName: \"kubernetes.io/projected/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-kube-api-access-fdltt\") pod \"nova-cell1-db-create-cnrbr\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.965250 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbk5q\" (UniqueName: \"kubernetes.io/projected/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-kube-api-access-kbk5q\") pod \"nova-cell0-c4d1-account-create-update-9rq2z\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.969222 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.989910 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.995185 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-f6ce-account-create-update-6j84v"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:36.997077 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.003645 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.024607 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f6ce-account-create-update-6j84v"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.067464 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-operator-scripts\") pod \"nova-cell0-c4d1-account-create-update-9rq2z\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.067569 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpwzk\" (UniqueName: \"kubernetes.io/projected/1b25887e-9a4b-4100-b672-2d46c34cf1e0-kube-api-access-fpwzk\") pod \"nova-cell1-f6ce-account-create-update-6j84v\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.067598 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdltt\" (UniqueName: \"kubernetes.io/projected/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-kube-api-access-fdltt\") pod \"nova-cell1-db-create-cnrbr\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.067660 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbk5q\" (UniqueName: \"kubernetes.io/projected/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-kube-api-access-kbk5q\") pod \"nova-cell0-c4d1-account-create-update-9rq2z\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.067696 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b25887e-9a4b-4100-b672-2d46c34cf1e0-operator-scripts\") pod \"nova-cell1-f6ce-account-create-update-6j84v\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.067751 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-operator-scripts\") pod \"nova-cell1-db-create-cnrbr\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.068502 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-operator-scripts\") pod \"nova-cell1-db-create-cnrbr\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.069095 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-operator-scripts\") pod \"nova-cell0-c4d1-account-create-update-9rq2z\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.091283 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdltt\" (UniqueName: \"kubernetes.io/projected/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-kube-api-access-fdltt\") pod \"nova-cell1-db-create-cnrbr\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.094573 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbk5q\" (UniqueName: \"kubernetes.io/projected/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-kube-api-access-kbk5q\") pod \"nova-cell0-c4d1-account-create-update-9rq2z\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.149567 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.170478 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpwzk\" (UniqueName: \"kubernetes.io/projected/1b25887e-9a4b-4100-b672-2d46c34cf1e0-kube-api-access-fpwzk\") pod \"nova-cell1-f6ce-account-create-update-6j84v\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.170626 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b25887e-9a4b-4100-b672-2d46c34cf1e0-operator-scripts\") pod \"nova-cell1-f6ce-account-create-update-6j84v\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.171643 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b25887e-9a4b-4100-b672-2d46c34cf1e0-operator-scripts\") pod \"nova-cell1-f6ce-account-create-update-6j84v\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.205431 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpwzk\" (UniqueName: \"kubernetes.io/projected/1b25887e-9a4b-4100-b672-2d46c34cf1e0-kube-api-access-fpwzk\") pod \"nova-cell1-f6ce-account-create-update-6j84v\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.252485 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.253048 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.278615 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-config-data\") pod \"390af7bf-94c3-41f8-9733-292f15440b98\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.278733 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56cjj\" (UniqueName: \"kubernetes.io/projected/390af7bf-94c3-41f8-9733-292f15440b98-kube-api-access-56cjj\") pod \"390af7bf-94c3-41f8-9733-292f15440b98\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.278788 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-combined-ca-bundle\") pod \"390af7bf-94c3-41f8-9733-292f15440b98\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.280006 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-run-httpd\") pod \"390af7bf-94c3-41f8-9733-292f15440b98\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.280044 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-scripts\") pod \"390af7bf-94c3-41f8-9733-292f15440b98\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.280140 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-sg-core-conf-yaml\") pod \"390af7bf-94c3-41f8-9733-292f15440b98\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.280295 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-log-httpd\") pod \"390af7bf-94c3-41f8-9733-292f15440b98\" (UID: \"390af7bf-94c3-41f8-9733-292f15440b98\") " Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.281283 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "390af7bf-94c3-41f8-9733-292f15440b98" (UID: "390af7bf-94c3-41f8-9733-292f15440b98"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.282924 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.283318 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "390af7bf-94c3-41f8-9733-292f15440b98" (UID: "390af7bf-94c3-41f8-9733-292f15440b98"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.299805 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-scripts" (OuterVolumeSpecName: "scripts") pod "390af7bf-94c3-41f8-9733-292f15440b98" (UID: "390af7bf-94c3-41f8-9733-292f15440b98"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.300210 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4tsxl" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="registry-server" containerID="cri-o://891f6558ae22e860a4ccf2c04da7badfb43b325b51c715478b339598170cfffb" gracePeriod=2 Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.300327 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.300853 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390af7bf-94c3-41f8-9733-292f15440b98","Type":"ContainerDied","Data":"ace5f5b3e9584358e0745b05b2206e68852cdf7fa48a37b0c5434b884092cf1b"} Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.300881 4689 scope.go:117] "RemoveContainer" containerID="117b0f0e510cb934a9c49a820cbf637412206b7118ff24e3256808c41cbd8a5a" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.303490 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8wtbq" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="registry-server" probeResult="failure" output=< Jan 23 11:13:37 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:13:37 crc kubenswrapper[4689]: > Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.342537 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/390af7bf-94c3-41f8-9733-292f15440b98-kube-api-access-56cjj" (OuterVolumeSpecName: "kube-api-access-56cjj") pod "390af7bf-94c3-41f8-9733-292f15440b98" (UID: "390af7bf-94c3-41f8-9733-292f15440b98"). InnerVolumeSpecName "kube-api-access-56cjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.376048 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-99bf8995f-hqj6x"] Jan 23 11:13:37 crc kubenswrapper[4689]: E0123 11:13:37.376721 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="proxy-httpd" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.376746 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="proxy-httpd" Jan 23 11:13:37 crc kubenswrapper[4689]: E0123 11:13:37.376770 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="sg-core" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.376778 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="sg-core" Jan 23 11:13:37 crc kubenswrapper[4689]: E0123 11:13:37.376792 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-central-agent" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.376800 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-central-agent" Jan 23 11:13:37 crc kubenswrapper[4689]: E0123 11:13:37.376808 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-notification-agent" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.376818 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-notification-agent" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.377129 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="sg-core" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.377167 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-notification-agent" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.377195 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="ceilometer-central-agent" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.377215 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="390af7bf-94c3-41f8-9733-292f15440b98" containerName="proxy-httpd" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.389752 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-67ddbdd968-r44lr"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.391465 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.393429 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.393453 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390af7bf-94c3-41f8-9733-292f15440b98-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.393461 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56cjj\" (UniqueName: \"kubernetes.io/projected/390af7bf-94c3-41f8-9733-292f15440b98-kube-api-access-56cjj\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.393605 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.405072 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.435371 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-6b6f6df6f9-dvwpc"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.437381 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.450598 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "390af7bf-94c3-41f8-9733-292f15440b98" (UID: "390af7bf-94c3-41f8-9733-292f15440b98"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.450988 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-99bf8995f-hqj6x"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.483924 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-67ddbdd968-r44lr"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522182 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522237 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzckj\" (UniqueName: \"kubernetes.io/projected/e81e6348-c4b6-4601-9ad2-1df770e175a6-kube-api-access-lzckj\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522271 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-combined-ca-bundle\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522307 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data-custom\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522333 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9z7f\" (UniqueName: \"kubernetes.io/projected/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-kube-api-access-t9z7f\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522519 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522556 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data-custom\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522586 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-combined-ca-bundle\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.522986 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.580205 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6b6f6df6f9-dvwpc"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.614910 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.615161 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-log" containerID="cri-o://081eeeb029b6dbd702c8ac6a4c6e88d6920f2af25d4372c65eeee1cf3b685ef5" gracePeriod=30 Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.616622 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-httpd" containerID="cri-o://c6478ad3e2e904d66aa12366c2d5a0e63131e29f36f036f4d6fb19aff8730df9" gracePeriod=30 Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.626650 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data-custom\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.626742 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.626849 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.626894 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data-custom\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.626930 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-combined-ca-bundle\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.627050 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.627079 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzckj\" (UniqueName: \"kubernetes.io/projected/e81e6348-c4b6-4601-9ad2-1df770e175a6-kube-api-access-lzckj\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.627112 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-combined-ca-bundle\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.627166 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data-custom\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.627197 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9z7f\" (UniqueName: \"kubernetes.io/projected/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-kube-api-access-t9z7f\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.627223 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skq7h\" (UniqueName: \"kubernetes.io/projected/aa54a9d4-e837-442f-9d18-5e7b0a05e807-kube-api-access-skq7h\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.627281 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-combined-ca-bundle\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.649492 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data-custom\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.683919 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9z7f\" (UniqueName: \"kubernetes.io/projected/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-kube-api-access-t9z7f\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.696324 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "390af7bf-94c3-41f8-9733-292f15440b98" (UID: "390af7bf-94c3-41f8-9733-292f15440b98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.698615 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-combined-ca-bundle\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.698713 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data-custom\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.699379 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-combined-ca-bundle\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.706771 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.708083 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data\") pod \"heat-cfnapi-67ddbdd968-r44lr\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.709424 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzckj\" (UniqueName: \"kubernetes.io/projected/e81e6348-c4b6-4601-9ad2-1df770e175a6-kube-api-access-lzckj\") pod \"heat-api-99bf8995f-hqj6x\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.729784 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skq7h\" (UniqueName: \"kubernetes.io/projected/aa54a9d4-e837-442f-9d18-5e7b0a05e807-kube-api-access-skq7h\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.729886 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-combined-ca-bundle\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.729958 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data-custom\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.730018 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.730213 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.779772 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.784728 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-config-data" (OuterVolumeSpecName: "config-data") pod "390af7bf-94c3-41f8-9733-292f15440b98" (UID: "390af7bf-94c3-41f8-9733-292f15440b98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.785929 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.791264 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data-custom\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.793475 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.811295 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skq7h\" (UniqueName: \"kubernetes.io/projected/aa54a9d4-e837-442f-9d18-5e7b0a05e807-kube-api-access-skq7h\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.817274 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-combined-ca-bundle\") pod \"heat-engine-6b6f6df6f9-dvwpc\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.832162 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390af7bf-94c3-41f8-9733-292f15440b98-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.862528 4689 scope.go:117] "RemoveContainer" containerID="a505d2c1b1c494036dc000a78ebf1686e8a39ff058cd2a281348af2779d07f3f" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.915493 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 11:13:37 crc kubenswrapper[4689]: I0123 11:13:37.942222 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.055330 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-scripts\") pod \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.055732 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqkjn\" (UniqueName: \"kubernetes.io/projected/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-kube-api-access-fqkjn\") pod \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.055765 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data\") pod \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.055885 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-ovndb-tls-certs\") pod \"5760eb27-db85-406c-8367-f03313a9a14a\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.055919 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-httpd-config\") pod \"5760eb27-db85-406c-8367-f03313a9a14a\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.055950 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-logs\") pod \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.055976 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-combined-ca-bundle\") pod \"5760eb27-db85-406c-8367-f03313a9a14a\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.056028 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-etc-machine-id\") pod \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.056044 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-config\") pod \"5760eb27-db85-406c-8367-f03313a9a14a\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.056256 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqk5n\" (UniqueName: \"kubernetes.io/projected/5760eb27-db85-406c-8367-f03313a9a14a-kube-api-access-mqk5n\") pod \"5760eb27-db85-406c-8367-f03313a9a14a\" (UID: \"5760eb27-db85-406c-8367-f03313a9a14a\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.056295 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data-custom\") pod \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.056334 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-combined-ca-bundle\") pod \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\" (UID: \"a11f1dc5-6e97-4344-8ecd-3c2537251ac0\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.057119 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-logs" (OuterVolumeSpecName: "logs") pod "a11f1dc5-6e97-4344-8ecd-3c2537251ac0" (UID: "a11f1dc5-6e97-4344-8ecd-3c2537251ac0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.063318 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a11f1dc5-6e97-4344-8ecd-3c2537251ac0" (UID: "a11f1dc5-6e97-4344-8ecd-3c2537251ac0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.070882 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-scripts" (OuterVolumeSpecName: "scripts") pod "a11f1dc5-6e97-4344-8ecd-3c2537251ac0" (UID: "a11f1dc5-6e97-4344-8ecd-3c2537251ac0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.077453 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5760eb27-db85-406c-8367-f03313a9a14a-kube-api-access-mqk5n" (OuterVolumeSpecName: "kube-api-access-mqk5n") pod "5760eb27-db85-406c-8367-f03313a9a14a" (UID: "5760eb27-db85-406c-8367-f03313a9a14a"). InnerVolumeSpecName "kube-api-access-mqk5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.077823 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.080375 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "5760eb27-db85-406c-8367-f03313a9a14a" (UID: "5760eb27-db85-406c-8367-f03313a9a14a"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.085858 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a11f1dc5-6e97-4344-8ecd-3c2537251ac0" (UID: "a11f1dc5-6e97-4344-8ecd-3c2537251ac0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.085928 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-kube-api-access-fqkjn" (OuterVolumeSpecName: "kube-api-access-fqkjn") pod "a11f1dc5-6e97-4344-8ecd-3c2537251ac0" (UID: "a11f1dc5-6e97-4344-8ecd-3c2537251ac0"). InnerVolumeSpecName "kube-api-access-fqkjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.096837 4689 scope.go:117] "RemoveContainer" containerID="3e9fc1db92380683fa51bc6c49ac4e2a0291cc36dc99dd591628a5bc795df84c" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.104494 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.115517 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.151039 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a11f1dc5-6e97-4344-8ecd-3c2537251ac0" (UID: "a11f1dc5-6e97-4344-8ecd-3c2537251ac0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.154009 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: E0123 11:13:38.156452 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api-log" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.156491 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api-log" Jan 23 11:13:38 crc kubenswrapper[4689]: E0123 11:13:38.156511 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-httpd" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.156518 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-httpd" Jan 23 11:13:38 crc kubenswrapper[4689]: E0123 11:13:38.156529 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.156536 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api" Jan 23 11:13:38 crc kubenswrapper[4689]: E0123 11:13:38.156572 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-api" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.156578 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-api" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.157067 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-httpd" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.157094 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.157111 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5760eb27-db85-406c-8367-f03313a9a14a" containerName="neutron-api" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.157170 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" containerName="cinder-api-log" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.159721 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.169665 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.170321 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175078 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqk5n\" (UniqueName: \"kubernetes.io/projected/5760eb27-db85-406c-8367-f03313a9a14a-kube-api-access-mqk5n\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175108 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175127 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175137 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175162 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqkjn\" (UniqueName: \"kubernetes.io/projected/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-kube-api-access-fqkjn\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175171 4689 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175185 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.175194 4689 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.196897 4689 scope.go:117] "RemoveContainer" containerID="39167b78e5dc85a7dc6e90d361d54de67d464904e50658e2753074ed85c4251a" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.224467 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.255500 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data" (OuterVolumeSpecName: "config-data") pod "a11f1dc5-6e97-4344-8ecd-3c2537251ac0" (UID: "a11f1dc5-6e97-4344-8ecd-3c2537251ac0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.277728 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.278059 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.278093 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-config-data\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.278120 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-run-httpd\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.282314 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-config" (OuterVolumeSpecName: "config") pod "5760eb27-db85-406c-8367-f03313a9a14a" (UID: "5760eb27-db85-406c-8367-f03313a9a14a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.286380 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zdqx\" (UniqueName: \"kubernetes.io/projected/afe9ed58-5b1a-4e7f-8598-aab9c465adae-kube-api-access-9zdqx\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.286462 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-log-httpd\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.286713 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-scripts\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.286938 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.286968 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a11f1dc5-6e97-4344-8ecd-3c2537251ac0-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.333453 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5760eb27-db85-406c-8367-f03313a9a14a" (UID: "5760eb27-db85-406c-8367-f03313a9a14a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.339728 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5b54798db9-jfwb5"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.350454 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fdbbd7548-jpgxk" event={"ID":"5760eb27-db85-406c-8367-f03313a9a14a","Type":"ContainerDied","Data":"0058ef1f42fc1f29f7df96b88f31189ae7bbc0cac96875f1a3fcfb593a580c34"} Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.350518 4689 scope.go:117] "RemoveContainer" containerID="fa2d601b21d2ee1b57a47a47dcd543ffe7bf1592de35d3457dde31db80efdab0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.350679 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fdbbd7548-jpgxk" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.368248 4689 generic.go:334] "Generic (PLEG): container finished" podID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerID="081eeeb029b6dbd702c8ac6a4c6e88d6920f2af25d4372c65eeee1cf3b685ef5" exitCode=143 Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.368313 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"053d67d2-ab83-4be2-8de7-0cd894da7a5b","Type":"ContainerDied","Data":"081eeeb029b6dbd702c8ac6a4c6e88d6920f2af25d4372c65eeee1cf3b685ef5"} Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392116 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-config-data\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392201 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-run-httpd\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392287 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zdqx\" (UniqueName: \"kubernetes.io/projected/afe9ed58-5b1a-4e7f-8598-aab9c465adae-kube-api-access-9zdqx\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392324 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-log-httpd\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392502 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-scripts\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392669 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392692 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392806 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.392805 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-run-httpd\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.394977 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-log-httpd\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.405027 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a11f1dc5-6e97-4344-8ecd-3c2537251ac0","Type":"ContainerDied","Data":"793d51d94e45fc7eba9da7e070c38542458a4c63ce35ca7785baf76991428aaf"} Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.405164 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.412247 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.415545 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.416867 4689 generic.go:334] "Generic (PLEG): container finished" podID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerID="891f6558ae22e860a4ccf2c04da7badfb43b325b51c715478b339598170cfffb" exitCode=0 Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.416924 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4tsxl" event={"ID":"af994331-2ca0-45e0-ab37-ea1de382cf8d","Type":"ContainerDied","Data":"891f6558ae22e860a4ccf2c04da7badfb43b325b51c715478b339598170cfffb"} Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.423893 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-config-data\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.433071 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b54798db9-jfwb5" event={"ID":"ab0d641a-3762-404a-baff-e2026b4a3896","Type":"ContainerStarted","Data":"da3f4e667e7a673e1c048afb62dd41e78c188c3fa74b52b48c03f99f0d5d51a8"} Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.440475 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "5760eb27-db85-406c-8367-f03313a9a14a" (UID: "5760eb27-db85-406c-8367-f03313a9a14a"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.441226 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-scripts\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.459657 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"1a99db63-bd38-464f-b9f9-31bc662fb39d","Type":"ContainerStarted","Data":"356e9c475428e21cfbfe7e46bcda3ec10a69d7520bf84243ae622d4860b98ed8"} Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.460251 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zdqx\" (UniqueName: \"kubernetes.io/projected/afe9ed58-5b1a-4e7f-8598-aab9c465adae-kube-api-access-9zdqx\") pod \"ceilometer-0\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.488567 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.630528816 podStartE2EDuration="20.488552397s" podCreationTimestamp="2026-01-23 11:13:18 +0000 UTC" firstStartedPulling="2026-01-23 11:13:19.720050217 +0000 UTC m=+1464.344730076" lastFinishedPulling="2026-01-23 11:13:36.578073748 +0000 UTC m=+1481.202753657" observedRunningTime="2026-01-23 11:13:38.482502907 +0000 UTC m=+1483.107182776" watchObservedRunningTime="2026-01-23 11:13:38.488552397 +0000 UTC m=+1483.113232256" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.490112 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.495141 4689 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5760eb27-db85-406c-8367-f03313a9a14a-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.654352 4689 scope.go:117] "RemoveContainer" containerID="84571097cab79784f9cd491a59409d46cd57299976df5353d6252a8632cee3d2" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.715950 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.717276 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.725479 4689 scope.go:117] "RemoveContainer" containerID="7709aaf5e05b9e11f2be156ecffb936e2cc185524162a8d1d36ca3d487290d9b" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.738135 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.749896 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: E0123 11:13:38.750426 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="registry-server" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.750444 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="registry-server" Jan 23 11:13:38 crc kubenswrapper[4689]: E0123 11:13:38.750464 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="extract-utilities" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.750470 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="extract-utilities" Jan 23 11:13:38 crc kubenswrapper[4689]: E0123 11:13:38.750507 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="extract-content" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.750513 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="extract-content" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.750748 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" containerName="registry-server" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.752008 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.761678 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.772408 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.772615 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.772838 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.789431 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5fdbbd7548-jpgxk"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.801185 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5fdbbd7548-jpgxk"] Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.803398 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8ldr\" (UniqueName: \"kubernetes.io/projected/af994331-2ca0-45e0-ab37-ea1de382cf8d-kube-api-access-v8ldr\") pod \"af994331-2ca0-45e0-ab37-ea1de382cf8d\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.803614 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-catalog-content\") pod \"af994331-2ca0-45e0-ab37-ea1de382cf8d\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.803739 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-utilities\") pod \"af994331-2ca0-45e0-ab37-ea1de382cf8d\" (UID: \"af994331-2ca0-45e0-ab37-ea1de382cf8d\") " Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.808113 4689 scope.go:117] "RemoveContainer" containerID="79ffea6f359015be6b20d789b26088cd9590978a1bcf790408b33ff8bb8e8d81" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.813338 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af994331-2ca0-45e0-ab37-ea1de382cf8d-kube-api-access-v8ldr" (OuterVolumeSpecName: "kube-api-access-v8ldr") pod "af994331-2ca0-45e0-ab37-ea1de382cf8d" (UID: "af994331-2ca0-45e0-ab37-ea1de382cf8d"). InnerVolumeSpecName "kube-api-access-v8ldr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.828918 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-utilities" (OuterVolumeSpecName: "utilities") pod "af994331-2ca0-45e0-ab37-ea1de382cf8d" (UID: "af994331-2ca0-45e0-ab37-ea1de382cf8d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.876850 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af994331-2ca0-45e0-ab37-ea1de382cf8d" (UID: "af994331-2ca0-45e0-ab37-ea1de382cf8d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.905768 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-config-data-custom\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.905809 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.905848 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21f5e336-9a50-43ce-8816-46552dcc4b43-logs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.905893 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.905928 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-public-tls-certs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.905950 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-scripts\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.905999 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-config-data\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.906021 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mv8g\" (UniqueName: \"kubernetes.io/projected/21f5e336-9a50-43ce-8816-46552dcc4b43-kube-api-access-8mv8g\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.906127 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/21f5e336-9a50-43ce-8816-46552dcc4b43-etc-machine-id\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.906206 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8ldr\" (UniqueName: \"kubernetes.io/projected/af994331-2ca0-45e0-ab37-ea1de382cf8d-kube-api-access-v8ldr\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.906221 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:38 crc kubenswrapper[4689]: I0123 11:13:38.906232 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af994331-2ca0-45e0-ab37-ea1de382cf8d-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010669 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-config-data-custom\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010712 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010756 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21f5e336-9a50-43ce-8816-46552dcc4b43-logs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010800 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010835 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-public-tls-certs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010861 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-scripts\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010909 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-config-data\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.010932 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mv8g\" (UniqueName: \"kubernetes.io/projected/21f5e336-9a50-43ce-8816-46552dcc4b43-kube-api-access-8mv8g\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.011029 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/21f5e336-9a50-43ce-8816-46552dcc4b43-etc-machine-id\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.011130 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/21f5e336-9a50-43ce-8816-46552dcc4b43-etc-machine-id\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.012093 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21f5e336-9a50-43ce-8816-46552dcc4b43-logs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.015792 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-config-data-custom\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.028525 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.029991 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-public-tls-certs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.030780 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-scripts\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.039020 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-config-data\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.039737 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21f5e336-9a50-43ce-8816-46552dcc4b43-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.048375 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mv8g\" (UniqueName: \"kubernetes.io/projected/21f5e336-9a50-43ce-8816-46552dcc4b43-kube-api-access-8mv8g\") pod \"cinder-api-0\" (UID: \"21f5e336-9a50-43ce-8816-46552dcc4b43\") " pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.048478 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b8f8c96b8-rjrg7"] Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.069277 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-757d5d5668-vt8hl"] Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.081188 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6dbc994bcd-6lt7s"] Jan 23 11:13:39 crc kubenswrapper[4689]: W0123 11:13:39.092406 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49fd7217_901d_4fa1_b3f2_2a883295cf83.slice/crio-00404ea4f88fa1626b109e4b15b31cc60fe12875bf0bfcbdcae405535a8d5dfb WatchSource:0}: Error finding container 00404ea4f88fa1626b109e4b15b31cc60fe12875bf0bfcbdcae405535a8d5dfb: Status 404 returned error can't find the container with id 00404ea4f88fa1626b109e4b15b31cc60fe12875bf0bfcbdcae405535a8d5dfb Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.092478 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-r9b2p"] Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.097681 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.106618 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d978555f9-bkg4n"] Jan 23 11:13:39 crc kubenswrapper[4689]: W0123 11:13:39.138833 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a91b673_3b20_4718_91f0_695225bc7f82.slice/crio-c37a13a38d3b3e349c568b9eb7560c71768961aae3d4b9c4fac8c9d4c5c9d0cb WatchSource:0}: Error finding container c37a13a38d3b3e349c568b9eb7560c71768961aae3d4b9c4fac8c9d4c5c9d0cb: Status 404 returned error can't find the container with id c37a13a38d3b3e349c568b9eb7560c71768961aae3d4b9c4fac8c9d4c5c9d0cb Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.254775 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f707-account-create-update-w6nbh"] Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.292856 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7hcxl"] Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.569339 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4tsxl" event={"ID":"af994331-2ca0-45e0-ab37-ea1de382cf8d","Type":"ContainerDied","Data":"6674314a2cbba6d08f3f89aab49e91e1431027ab2238d67c96f3a676809bf905"} Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.569604 4689 scope.go:117] "RemoveContainer" containerID="891f6558ae22e860a4ccf2c04da7badfb43b325b51c715478b339598170cfffb" Jan 23 11:13:39 crc kubenswrapper[4689]: I0123 11:13:39.569714 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4tsxl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:39.946885 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5b54798db9-jfwb5" podStartSLOduration=12.946859941 podStartE2EDuration="12.946859941s" podCreationTimestamp="2026-01-23 11:13:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:39.879004111 +0000 UTC m=+1484.503683970" watchObservedRunningTime="2026-01-23 11:13:39.946859941 +0000 UTC m=+1484.571539800" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.049704 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="390af7bf-94c3-41f8-9733-292f15440b98" path="/var/lib/kubelet/pods/390af7bf-94c3-41f8-9733-292f15440b98/volumes" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.051201 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5760eb27-db85-406c-8367-f03313a9a14a" path="/var/lib/kubelet/pods/5760eb27-db85-406c-8367-f03313a9a14a/volumes" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.053622 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a11f1dc5-6e97-4344-8ecd-3c2537251ac0" path="/var/lib/kubelet/pods/a11f1dc5-6e97-4344-8ecd-3c2537251ac0/volumes" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062131 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" event={"ID":"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb","Type":"ContainerStarted","Data":"6d6b7229f90af3194b531b2a5b9afcc44509530d68fd00fa602f267cd6693eb8"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062220 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062240 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062256 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c4d1-account-create-update-9rq2z"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062273 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f6ce-account-create-update-6j84v"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062285 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-r9b2p" event={"ID":"2a91b673-3b20-4718-91f0-695225bc7f82","Type":"ContainerStarted","Data":"c37a13a38d3b3e349c568b9eb7560c71768961aae3d4b9c4fac8c9d4c5c9d0cb"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062298 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-cnrbr"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062310 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-757d5d5668-vt8hl" event={"ID":"49fd7217-901d-4fa1-b3f2-2a883295cf83","Type":"ContainerStarted","Data":"00404ea4f88fa1626b109e4b15b31cc60fe12875bf0bfcbdcae405535a8d5dfb"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062321 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-67ddbdd968-r44lr"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062331 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f707-account-create-update-w6nbh" event={"ID":"3cde63b4-6033-4530-884d-55d2274538c5","Type":"ContainerStarted","Data":"c2b459f9b55bac1b8255af89d3ea8b1d6b81ae6b09e41ca02f10f071eef4936c"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062342 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b54798db9-jfwb5" event={"ID":"ab0d641a-3762-404a-baff-e2026b4a3896","Type":"ContainerStarted","Data":"266ebc810b1ef1d24651e3d759e066a3cf2cd6376922e7920b9950d328b1f0a0"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062352 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b54798db9-jfwb5" event={"ID":"ab0d641a-3762-404a-baff-e2026b4a3896","Type":"ContainerStarted","Data":"8acd6a691ca0cb6ea5d7313243c652bea99f7f98950d96ed1d561c22918d05de"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062361 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" event={"ID":"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b","Type":"ContainerStarted","Data":"7845099d8b66a18e2445cc4bdd1723b0fe617f0b1b9da9db6647b83f0237e5b7"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062373 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7hcxl" event={"ID":"d4348edb-5864-4652-9e63-b2c452905118","Type":"ContainerStarted","Data":"09f7272790fa431f1efbd84ef2a0132688a388ebfdc9beddae0f48959d847e09"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.062383 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b8f8c96b8-rjrg7" event={"ID":"b57b3805-c785-43e0-a8f2-6bab72916aa4","Type":"ContainerStarted","Data":"2fbb2b60d11819e4c9949c8ad4a6434b0c6b806dd6206a21648bf0306cf503ee"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.077115 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-99bf8995f-hqj6x"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.094701 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6b6f6df6f9-dvwpc"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.112697 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.159550 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.373960 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7b8f8c96b8-rjrg7"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.393602 4689 scope.go:117] "RemoveContainer" containerID="cbd0b58973fe413087706d6f684a397fad4571e933d2cf28aeecc0787f330db8" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.407879 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6dbc994bcd-6lt7s"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.444159 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5fbcd48894-jz4gg"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.451558 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.454381 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.454570 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.454672 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-ffbc4d8cf-mv5hl"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.459869 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.464084 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.464344 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.465917 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5fbcd48894-jz4gg"] Jan 23 11:13:40 crc kubenswrapper[4689]: E0123 11:13:40.490813 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf994331_2ca0_45e0_ab37_ea1de382cf8d.slice/crio-6674314a2cbba6d08f3f89aab49e91e1431027ab2238d67c96f3a676809bf905\": RecentStats: unable to find data in memory cache]" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.491777 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-ffbc4d8cf-mv5hl"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.524458 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4tsxl"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.544738 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4tsxl"] Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.552798 4689 scope.go:117] "RemoveContainer" containerID="38efb61dd2bff4f8d6bb484e978c4d26dc53f0d9d0bfb6076d71667604bf5c08" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629048 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfnhn\" (UniqueName: \"kubernetes.io/projected/4b102ee6-4978-45ed-9026-94e1c433d3f6-kube-api-access-jfnhn\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629116 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-combined-ca-bundle\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629214 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data-custom\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629314 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629369 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-combined-ca-bundle\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629392 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629466 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-internal-tls-certs\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629502 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-internal-tls-certs\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629530 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-public-tls-certs\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629569 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vt5q\" (UniqueName: \"kubernetes.io/projected/cd9d1a8d-fd0e-4155-8085-5584c456cecb-kube-api-access-4vt5q\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629601 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-public-tls-certs\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.629654 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data-custom\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.754865 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-public-tls-certs\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.759139 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data-custom\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.760657 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfnhn\" (UniqueName: \"kubernetes.io/projected/4b102ee6-4978-45ed-9026-94e1c433d3f6-kube-api-access-jfnhn\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.760819 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-combined-ca-bundle\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.761078 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data-custom\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.761422 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.768775 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-combined-ca-bundle\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.770824 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.772202 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data-custom\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.772554 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-internal-tls-certs\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.772636 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-internal-tls-certs\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.772684 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-public-tls-certs\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.772755 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vt5q\" (UniqueName: \"kubernetes.io/projected/cd9d1a8d-fd0e-4155-8085-5584c456cecb-kube-api-access-4vt5q\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.779236 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.788651 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data-custom\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.788927 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-internal-tls-certs\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.791920 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.795781 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vt5q\" (UniqueName: \"kubernetes.io/projected/cd9d1a8d-fd0e-4155-8085-5584c456cecb-kube-api-access-4vt5q\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.806900 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-combined-ca-bundle\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.807430 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-combined-ca-bundle\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.807713 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-public-tls-certs\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.809613 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfnhn\" (UniqueName: \"kubernetes.io/projected/4b102ee6-4978-45ed-9026-94e1c433d3f6-kube-api-access-jfnhn\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.810526 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-internal-tls-certs\") pod \"heat-api-5fbcd48894-jz4gg\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.814032 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-public-tls-certs\") pod \"heat-cfnapi-ffbc4d8cf-mv5hl\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.927944 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-cnrbr" event={"ID":"9f3f3ef3-0b10-4d91-8147-3ea51947dc78","Type":"ContainerStarted","Data":"dbb63de3c26be396b137942e59c85a669b09ac7215bfe4aedd0ab92ff5715222"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.927995 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-cnrbr" event={"ID":"9f3f3ef3-0b10-4d91-8147-3ea51947dc78","Type":"ContainerStarted","Data":"8e907656cdb1dafd44eb89fd8351be2e0dc9bc0b84780a511fa98f76c7d50c6b"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.929677 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" event={"ID":"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2","Type":"ContainerStarted","Data":"f4c1305b3821edd3c03ccce906705f7bd693bfa22f66a2ef226cbb4bb9015f64"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.937275 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-99bf8995f-hqj6x" event={"ID":"e81e6348-c4b6-4601-9ad2-1df770e175a6","Type":"ContainerStarted","Data":"028e67a89836969a70943b33c2747d51d516dc31febeb8e2282d6a1e8d46b54f"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.939077 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" event={"ID":"1b25887e-9a4b-4100-b672-2d46c34cf1e0","Type":"ContainerStarted","Data":"d2489c9be2b7e472358d427003bbd9185b9d65005f8a8b18b153955726f6daa8"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.945946 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-cnrbr" podStartSLOduration=4.945931693 podStartE2EDuration="4.945931693s" podCreationTimestamp="2026-01-23 11:13:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:40.941974944 +0000 UTC m=+1485.566654803" watchObservedRunningTime="2026-01-23 11:13:40.945931693 +0000 UTC m=+1485.570611552" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.946774 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7hcxl" event={"ID":"d4348edb-5864-4652-9e63-b2c452905118","Type":"ContainerStarted","Data":"cc9c8c96062815e530ed52a1f36964b78b7f414e15de42a2b52070d1c59cc6fa"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.951044 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-757d5d5668-vt8hl" event={"ID":"49fd7217-901d-4fa1-b3f2-2a883295cf83","Type":"ContainerStarted","Data":"8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.952046 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.963163 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-r9b2p" event={"ID":"2a91b673-3b20-4718-91f0-695225bc7f82","Type":"ContainerStarted","Data":"6987a4d4001346ee6eb2af00615ecf5892d3675072e34ffd2645b894d0937017"} Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.963911 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-7hcxl" podStartSLOduration=4.96390029 podStartE2EDuration="4.96390029s" podCreationTimestamp="2026-01-23 11:13:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:40.957649314 +0000 UTC m=+1485.582329173" watchObservedRunningTime="2026-01-23 11:13:40.96390029 +0000 UTC m=+1485.588580149" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.985813 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-757d5d5668-vt8hl" podStartSLOduration=10.985797126 podStartE2EDuration="10.985797126s" podCreationTimestamp="2026-01-23 11:13:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:40.975664163 +0000 UTC m=+1485.600344022" watchObservedRunningTime="2026-01-23 11:13:40.985797126 +0000 UTC m=+1485.610476985" Jan 23 11:13:40 crc kubenswrapper[4689]: I0123 11:13:40.988603 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" event={"ID":"aa54a9d4-e837-442f-9d18-5e7b0a05e807","Type":"ContainerStarted","Data":"6a9438c1d04252e4663af9cc2311c89f3f2d4afe29396ec1aebc8b47f77692c6"} Jan 23 11:13:41 crc kubenswrapper[4689]: I0123 11:13:41.015941 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" event={"ID":"3ec58659-52d0-4a0d-a7b6-f6d1488be93c","Type":"ContainerStarted","Data":"5576436c35e1ce064eac63b39a286ec0e7504ddb731908d102b29c4f3243dd5e"} Jan 23 11:13:41 crc kubenswrapper[4689]: I0123 11:13:41.024186 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerStarted","Data":"f7430e36ead9927afa8e977b6f5426473c64a103e3ac5502ef3ed0a26a67f27e"} Jan 23 11:13:41 crc kubenswrapper[4689]: I0123 11:13:41.027636 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"21f5e336-9a50-43ce-8816-46552dcc4b43","Type":"ContainerStarted","Data":"6af7d717b40e136ef2c68a4430378094fed38043c99fa15262d860b78f8305d6"} Jan 23 11:13:41 crc kubenswrapper[4689]: I0123 11:13:41.290608 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:41 crc kubenswrapper[4689]: I0123 11:13:41.300707 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:41 crc kubenswrapper[4689]: I0123 11:13:41.674034 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af994331-2ca0-45e0-ab37-ea1de382cf8d" path="/var/lib/kubelet/pods/af994331-2ca0-45e0-ab37-ea1de382cf8d/volumes" Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.055184 4689 generic.go:334] "Generic (PLEG): container finished" podID="d4348edb-5864-4652-9e63-b2c452905118" containerID="cc9c8c96062815e530ed52a1f36964b78b7f414e15de42a2b52070d1c59cc6fa" exitCode=0 Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.055302 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7hcxl" event={"ID":"d4348edb-5864-4652-9e63-b2c452905118","Type":"ContainerDied","Data":"cc9c8c96062815e530ed52a1f36964b78b7f414e15de42a2b52070d1c59cc6fa"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.059127 4689 generic.go:334] "Generic (PLEG): container finished" podID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerID="c6478ad3e2e904d66aa12366c2d5a0e63131e29f36f036f4d6fb19aff8730df9" exitCode=0 Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.059281 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"053d67d2-ab83-4be2-8de7-0cd894da7a5b","Type":"ContainerDied","Data":"c6478ad3e2e904d66aa12366c2d5a0e63131e29f36f036f4d6fb19aff8730df9"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.065746 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f707-account-create-update-w6nbh" event={"ID":"3cde63b4-6033-4530-884d-55d2274538c5","Type":"ContainerStarted","Data":"1c42ba5d5820ff524a755e99c8a19d0b72c310815da77fe48ecaffb373f3cc9a"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.101740 4689 generic.go:334] "Generic (PLEG): container finished" podID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerID="cc197ab7b0e78dffca3a2549bb5f38538c3bd4df8d00abf5259344da36c8f7ba" exitCode=0 Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.101850 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" event={"ID":"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb","Type":"ContainerDied","Data":"cc197ab7b0e78dffca3a2549bb5f38538c3bd4df8d00abf5259344da36c8f7ba"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.104201 4689 generic.go:334] "Generic (PLEG): container finished" podID="9f3f3ef3-0b10-4d91-8147-3ea51947dc78" containerID="dbb63de3c26be396b137942e59c85a669b09ac7215bfe4aedd0ab92ff5715222" exitCode=0 Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.104278 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-cnrbr" event={"ID":"9f3f3ef3-0b10-4d91-8147-3ea51947dc78","Type":"ContainerDied","Data":"dbb63de3c26be396b137942e59c85a669b09ac7215bfe4aedd0ab92ff5715222"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.116288 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" event={"ID":"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2","Type":"ContainerStarted","Data":"a2d518ab19a834f9b0667f069e1a0c90ff48526c26098f724789fccd3c1f9f82"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.116547 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-f707-account-create-update-w6nbh" podStartSLOduration=6.116524488 podStartE2EDuration="6.116524488s" podCreationTimestamp="2026-01-23 11:13:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:42.088409577 +0000 UTC m=+1486.713089436" watchObservedRunningTime="2026-01-23 11:13:42.116524488 +0000 UTC m=+1486.741204347" Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.119641 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" event={"ID":"1b25887e-9a4b-4100-b672-2d46c34cf1e0","Type":"ContainerStarted","Data":"d1df398695aeb7ef490b25b64a8d073b39e1ae879be5ef454a2f3d5631247c83"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.131762 4689 generic.go:334] "Generic (PLEG): container finished" podID="2a91b673-3b20-4718-91f0-695225bc7f82" containerID="6987a4d4001346ee6eb2af00615ecf5892d3675072e34ffd2645b894d0937017" exitCode=0 Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.131857 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-r9b2p" event={"ID":"2a91b673-3b20-4718-91f0-695225bc7f82","Type":"ContainerDied","Data":"6987a4d4001346ee6eb2af00615ecf5892d3675072e34ffd2645b894d0937017"} Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.170247 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5fbcd48894-jz4gg"] Jan 23 11:13:42 crc kubenswrapper[4689]: I0123 11:13:42.342097 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-ffbc4d8cf-mv5hl"] Jan 23 11:13:43 crc kubenswrapper[4689]: I0123 11:13:43.176342 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" podStartSLOduration=7.176323533 podStartE2EDuration="7.176323533s" podCreationTimestamp="2026-01-23 11:13:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:43.166306762 +0000 UTC m=+1487.790986631" watchObservedRunningTime="2026-01-23 11:13:43.176323533 +0000 UTC m=+1487.801003402" Jan 23 11:13:43 crc kubenswrapper[4689]: I0123 11:13:43.203711 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" podStartSLOduration=7.203688764 podStartE2EDuration="7.203688764s" podCreationTimestamp="2026-01-23 11:13:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:43.201729905 +0000 UTC m=+1487.826409764" watchObservedRunningTime="2026-01-23 11:13:43.203688764 +0000 UTC m=+1487.828368623" Jan 23 11:13:43 crc kubenswrapper[4689]: I0123 11:13:43.308771 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.025622 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.026087 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-log" containerID="cri-o://0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635" gracePeriod=30 Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.026538 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-httpd" containerID="cri-o://b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9" gracePeriod=30 Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.104177 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.106091 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.113479 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.130082 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.198602 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"21f5e336-9a50-43ce-8816-46552dcc4b43","Type":"ContainerStarted","Data":"2abb6b12ba254e2dd94da0264a1df05b41b5c35a5d84ce5d205619c25b5fbb04"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.207889 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7hcxl" event={"ID":"d4348edb-5864-4652-9e63-b2c452905118","Type":"ContainerDied","Data":"09f7272790fa431f1efbd84ef2a0132688a388ebfdc9beddae0f48959d847e09"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.207937 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09f7272790fa431f1efbd84ef2a0132688a388ebfdc9beddae0f48959d847e09" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.207992 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7hcxl" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.220040 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" event={"ID":"aa54a9d4-e837-442f-9d18-5e7b0a05e807","Type":"ContainerStarted","Data":"5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.221418 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.226682 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" event={"ID":"4b102ee6-4978-45ed-9026-94e1c433d3f6","Type":"ContainerStarted","Data":"1e802ff88d1e2f8a5ae85c05e71005444ec3f63509b5692cfd714a6a0a4c0a37"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.232843 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-cnrbr" event={"ID":"9f3f3ef3-0b10-4d91-8147-3ea51947dc78","Type":"ContainerDied","Data":"8e907656cdb1dafd44eb89fd8351be2e0dc9bc0b84780a511fa98f76c7d50c6b"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.232882 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e907656cdb1dafd44eb89fd8351be2e0dc9bc0b84780a511fa98f76c7d50c6b" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.232938 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-cnrbr" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.243017 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" podStartSLOduration=7.242999838 podStartE2EDuration="7.242999838s" podCreationTimestamp="2026-01-23 11:13:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:44.240363312 +0000 UTC m=+1488.865043171" watchObservedRunningTime="2026-01-23 11:13:44.242999838 +0000 UTC m=+1488.867679697" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.244228 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fbcd48894-jz4gg" event={"ID":"cd9d1a8d-fd0e-4155-8085-5584c456cecb","Type":"ContainerStarted","Data":"876be0fa5f4603f63ab2d41573c7aca0b4a89ad6c9a521ae65f93defcdf5fd92"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.249763 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-r9b2p" event={"ID":"2a91b673-3b20-4718-91f0-695225bc7f82","Type":"ContainerDied","Data":"c37a13a38d3b3e349c568b9eb7560c71768961aae3d4b9c4fac8c9d4c5c9d0cb"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.249798 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c37a13a38d3b3e349c568b9eb7560c71768961aae3d4b9c4fac8c9d4c5c9d0cb" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.249852 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-r9b2p" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.257416 4689 generic.go:334] "Generic (PLEG): container finished" podID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerID="0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635" exitCode=143 Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.257478 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3415e627-33c2-4457-9cea-4dbd78f4d2b3","Type":"ContainerDied","Data":"0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.267523 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerStarted","Data":"45fa74d7d96fc685ea6c478266a107679b854da356b20df54da7fea5a4153538"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.272932 4689 generic.go:334] "Generic (PLEG): container finished" podID="1b25887e-9a4b-4100-b672-2d46c34cf1e0" containerID="d1df398695aeb7ef490b25b64a8d073b39e1ae879be5ef454a2f3d5631247c83" exitCode=0 Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.272992 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" event={"ID":"1b25887e-9a4b-4100-b672-2d46c34cf1e0","Type":"ContainerDied","Data":"d1df398695aeb7ef490b25b64a8d073b39e1ae879be5ef454a2f3d5631247c83"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.287878 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"053d67d2-ab83-4be2-8de7-0cd894da7a5b","Type":"ContainerDied","Data":"b91929c51be4f94c664e8eeeb930cfd1fcf730e0fcbf1dce7c9bb45ce7f2c937"} Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.287929 4689 scope.go:117] "RemoveContainer" containerID="c6478ad3e2e904d66aa12366c2d5a0e63131e29f36f036f4d6fb19aff8730df9" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.288058 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.290603 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-public-tls-certs\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.290631 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knpn9\" (UniqueName: \"kubernetes.io/projected/053d67d2-ab83-4be2-8de7-0cd894da7a5b-kube-api-access-knpn9\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.290708 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-config-data\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.290737 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4348edb-5864-4652-9e63-b2c452905118-operator-scripts\") pod \"d4348edb-5864-4652-9e63-b2c452905118\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.290758 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-scripts\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.290783 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-httpd-run\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.291430 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4348edb-5864-4652-9e63-b2c452905118-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d4348edb-5864-4652-9e63-b2c452905118" (UID: "d4348edb-5864-4652-9e63-b2c452905118"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.291924 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292022 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292092 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a91b673-3b20-4718-91f0-695225bc7f82-operator-scripts\") pod \"2a91b673-3b20-4718-91f0-695225bc7f82\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292191 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdltt\" (UniqueName: \"kubernetes.io/projected/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-kube-api-access-fdltt\") pod \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292211 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bs6q\" (UniqueName: \"kubernetes.io/projected/d4348edb-5864-4652-9e63-b2c452905118-kube-api-access-8bs6q\") pod \"d4348edb-5864-4652-9e63-b2c452905118\" (UID: \"d4348edb-5864-4652-9e63-b2c452905118\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292335 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-operator-scripts\") pod \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\" (UID: \"9f3f3ef3-0b10-4d91-8147-3ea51947dc78\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292356 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-logs\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292397 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-combined-ca-bundle\") pod \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\" (UID: \"053d67d2-ab83-4be2-8de7-0cd894da7a5b\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292438 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84trr\" (UniqueName: \"kubernetes.io/projected/2a91b673-3b20-4718-91f0-695225bc7f82-kube-api-access-84trr\") pod \"2a91b673-3b20-4718-91f0-695225bc7f82\" (UID: \"2a91b673-3b20-4718-91f0-695225bc7f82\") " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292562 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a91b673-3b20-4718-91f0-695225bc7f82-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2a91b673-3b20-4718-91f0-695225bc7f82" (UID: "2a91b673-3b20-4718-91f0-695225bc7f82"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.292966 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9f3f3ef3-0b10-4d91-8147-3ea51947dc78" (UID: "9f3f3ef3-0b10-4d91-8147-3ea51947dc78"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.293607 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.293623 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4348edb-5864-4652-9e63-b2c452905118-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.293632 4689 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.293641 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a91b673-3b20-4718-91f0-695225bc7f82-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.294430 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-logs" (OuterVolumeSpecName: "logs") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.303307 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a91b673-3b20-4718-91f0-695225bc7f82-kube-api-access-84trr" (OuterVolumeSpecName: "kube-api-access-84trr") pod "2a91b673-3b20-4718-91f0-695225bc7f82" (UID: "2a91b673-3b20-4718-91f0-695225bc7f82"). InnerVolumeSpecName "kube-api-access-84trr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.303346 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-kube-api-access-fdltt" (OuterVolumeSpecName: "kube-api-access-fdltt") pod "9f3f3ef3-0b10-4d91-8147-3ea51947dc78" (UID: "9f3f3ef3-0b10-4d91-8147-3ea51947dc78"). InnerVolumeSpecName "kube-api-access-fdltt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.303888 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-scripts" (OuterVolumeSpecName: "scripts") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.309399 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/053d67d2-ab83-4be2-8de7-0cd894da7a5b-kube-api-access-knpn9" (OuterVolumeSpecName: "kube-api-access-knpn9") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "kube-api-access-knpn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.309474 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4348edb-5864-4652-9e63-b2c452905118-kube-api-access-8bs6q" (OuterVolumeSpecName: "kube-api-access-8bs6q") pod "d4348edb-5864-4652-9e63-b2c452905118" (UID: "d4348edb-5864-4652-9e63-b2c452905118"). InnerVolumeSpecName "kube-api-access-8bs6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.357259 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20" (OuterVolumeSpecName: "glance") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.374538 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.384305 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-config-data" (OuterVolumeSpecName: "config-data") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395796 4689 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395833 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knpn9\" (UniqueName: \"kubernetes.io/projected/053d67d2-ab83-4be2-8de7-0cd894da7a5b-kube-api-access-knpn9\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395851 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395863 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395892 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") on node \"crc\" " Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395910 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdltt\" (UniqueName: \"kubernetes.io/projected/9f3f3ef3-0b10-4d91-8147-3ea51947dc78-kube-api-access-fdltt\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395923 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bs6q\" (UniqueName: \"kubernetes.io/projected/d4348edb-5864-4652-9e63-b2c452905118-kube-api-access-8bs6q\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395935 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/053d67d2-ab83-4be2-8de7-0cd894da7a5b-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.395946 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84trr\" (UniqueName: \"kubernetes.io/projected/2a91b673-3b20-4718-91f0-695225bc7f82-kube-api-access-84trr\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.419438 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "053d67d2-ab83-4be2-8de7-0cd894da7a5b" (UID: "053d67d2-ab83-4be2-8de7-0cd894da7a5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.441495 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.441661 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20") on node "crc" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.501069 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.504185 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/053d67d2-ab83-4be2-8de7-0cd894da7a5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.627332 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.635968 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.661698 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:13:44 crc kubenswrapper[4689]: E0123 11:13:44.662191 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-httpd" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662210 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-httpd" Jan 23 11:13:44 crc kubenswrapper[4689]: E0123 11:13:44.662223 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-log" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662230 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-log" Jan 23 11:13:44 crc kubenswrapper[4689]: E0123 11:13:44.662240 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a91b673-3b20-4718-91f0-695225bc7f82" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662246 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a91b673-3b20-4718-91f0-695225bc7f82" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: E0123 11:13:44.662266 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4348edb-5864-4652-9e63-b2c452905118" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662273 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4348edb-5864-4652-9e63-b2c452905118" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: E0123 11:13:44.662310 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f3f3ef3-0b10-4d91-8147-3ea51947dc78" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662316 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f3f3ef3-0b10-4d91-8147-3ea51947dc78" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662517 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-httpd" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662536 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f3f3ef3-0b10-4d91-8147-3ea51947dc78" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662548 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a91b673-3b20-4718-91f0-695225bc7f82" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662557 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4348edb-5864-4652-9e63-b2c452905118" containerName="mariadb-database-create" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.662574 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" containerName="glance-log" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.663741 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.668690 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.669027 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.681120 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.812816 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njv6c\" (UniqueName: \"kubernetes.io/projected/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-kube-api-access-njv6c\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.812868 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.813021 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-scripts\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.813219 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.813298 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-config-data\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.813419 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.813737 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-logs\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.815317 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.932552 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njv6c\" (UniqueName: \"kubernetes.io/projected/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-kube-api-access-njv6c\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.932600 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.932674 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-scripts\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.932727 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.932760 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-config-data\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.932778 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.932825 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-logs\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.933337 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-logs\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.933519 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.935029 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.938487 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-config-data\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.939690 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.940503 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.940532 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0138e8ab527b9277b6df488760b5dddec306b1306b4e1e9b3fbb986ed4f86327/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.942371 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-scripts\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.943930 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.966896 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njv6c\" (UniqueName: \"kubernetes.io/projected/2edb6824-46b3-40f9-8bef-8ec1a068ed8e-kube-api-access-njv6c\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:44 crc kubenswrapper[4689]: I0123 11:13:44.975356 4689 scope.go:117] "RemoveContainer" containerID="081eeeb029b6dbd702c8ac6a4c6e88d6920f2af25d4372c65eeee1cf3b685ef5" Jan 23 11:13:45 crc kubenswrapper[4689]: I0123 11:13:45.082946 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8785d635-1f1a-459f-8eda-7ce44de6dc20\") pod \"glance-default-external-api-0\" (UID: \"2edb6824-46b3-40f9-8bef-8ec1a068ed8e\") " pod="openstack/glance-default-external-api-0" Jan 23 11:13:45 crc kubenswrapper[4689]: I0123 11:13:45.298982 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 11:13:45 crc kubenswrapper[4689]: I0123 11:13:45.315641 4689 generic.go:334] "Generic (PLEG): container finished" podID="3cde63b4-6033-4530-884d-55d2274538c5" containerID="1c42ba5d5820ff524a755e99c8a19d0b72c310815da77fe48ecaffb373f3cc9a" exitCode=0 Jan 23 11:13:45 crc kubenswrapper[4689]: I0123 11:13:45.315703 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f707-account-create-update-w6nbh" event={"ID":"3cde63b4-6033-4530-884d-55d2274538c5","Type":"ContainerDied","Data":"1c42ba5d5820ff524a755e99c8a19d0b72c310815da77fe48ecaffb373f3cc9a"} Jan 23 11:13:45 crc kubenswrapper[4689]: I0123 11:13:45.343514 4689 generic.go:334] "Generic (PLEG): container finished" podID="2776dc4d-d084-461d-9d4a-d4cccfcb1dc2" containerID="a2d518ab19a834f9b0667f069e1a0c90ff48526c26098f724789fccd3c1f9f82" exitCode=0 Jan 23 11:13:45 crc kubenswrapper[4689]: I0123 11:13:45.343815 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" event={"ID":"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2","Type":"ContainerDied","Data":"a2d518ab19a834f9b0667f069e1a0c90ff48526c26098f724789fccd3c1f9f82"} Jan 23 11:13:45 crc kubenswrapper[4689]: I0123 11:13:45.718527 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="053d67d2-ab83-4be2-8de7-0cd894da7a5b" path="/var/lib/kubelet/pods/053d67d2-ab83-4be2-8de7-0cd894da7a5b/volumes" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.308773 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.356716 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" event={"ID":"3ec58659-52d0-4a0d-a7b6-f6d1488be93c","Type":"ContainerStarted","Data":"f76e43d3ad93467e52bec55d6b086c5334941560d007637444f18911a93e5e1f"} Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.356792 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.367783 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" podUID="c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" containerName="heat-cfnapi" containerID="cri-o://56722c79a0b7c9ccf3206e72f496e3327caaeeb8d980d5807fdf0bfe06565079" gracePeriod=60 Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.368045 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" event={"ID":"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b","Type":"ContainerStarted","Data":"56722c79a0b7c9ccf3206e72f496e3327caaeeb8d980d5807fdf0bfe06565079"} Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.368081 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.368130 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.377363 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" podStartSLOduration=4.329242994 podStartE2EDuration="9.377344915s" podCreationTimestamp="2026-01-23 11:13:37 +0000 UTC" firstStartedPulling="2026-01-23 11:13:40.091813303 +0000 UTC m=+1484.716493172" lastFinishedPulling="2026-01-23 11:13:45.139915234 +0000 UTC m=+1489.764595093" observedRunningTime="2026-01-23 11:13:46.375901159 +0000 UTC m=+1491.000581018" watchObservedRunningTime="2026-01-23 11:13:46.377344915 +0000 UTC m=+1491.002024774" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.379308 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b8f8c96b8-rjrg7" event={"ID":"b57b3805-c785-43e0-a8f2-6bab72916aa4","Type":"ContainerStarted","Data":"d3dceed73ed2632c55b101e45e515ae5a53090607fdbb257cc86516383108f3c"} Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.379506 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-7b8f8c96b8-rjrg7" podUID="b57b3805-c785-43e0-a8f2-6bab72916aa4" containerName="heat-api" containerID="cri-o://d3dceed73ed2632c55b101e45e515ae5a53090607fdbb257cc86516383108f3c" gracePeriod=60 Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.379604 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.381434 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.382694 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" event={"ID":"1b25887e-9a4b-4100-b672-2d46c34cf1e0","Type":"ContainerDied","Data":"d2489c9be2b7e472358d427003bbd9185b9d65005f8a8b18b153955726f6daa8"} Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.382721 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2489c9be2b7e472358d427003bbd9185b9d65005f8a8b18b153955726f6daa8" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.403726 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" podStartSLOduration=10.426716097 podStartE2EDuration="16.403707172s" podCreationTimestamp="2026-01-23 11:13:30 +0000 UTC" firstStartedPulling="2026-01-23 11:13:39.16134346 +0000 UTC m=+1483.786023319" lastFinishedPulling="2026-01-23 11:13:45.138334535 +0000 UTC m=+1489.763014394" observedRunningTime="2026-01-23 11:13:46.394542303 +0000 UTC m=+1491.019222162" watchObservedRunningTime="2026-01-23 11:13:46.403707172 +0000 UTC m=+1491.028387031" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.408571 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" event={"ID":"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb","Type":"ContainerStarted","Data":"d6cee0c9c9a5402c222163856c2e78ceeada861845cebb80204ab765be67b048"} Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.409509 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.411540 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.423822 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7b8f8c96b8-rjrg7" podStartSLOduration=10.33977792 podStartE2EDuration="16.423803422s" podCreationTimestamp="2026-01-23 11:13:30 +0000 UTC" firstStartedPulling="2026-01-23 11:13:39.055703028 +0000 UTC m=+1483.680382887" lastFinishedPulling="2026-01-23 11:13:45.13972853 +0000 UTC m=+1489.764408389" observedRunningTime="2026-01-23 11:13:46.422748266 +0000 UTC m=+1491.047428125" watchObservedRunningTime="2026-01-23 11:13:46.423803422 +0000 UTC m=+1491.048483281" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.511060 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b25887e-9a4b-4100-b672-2d46c34cf1e0-operator-scripts\") pod \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.511288 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpwzk\" (UniqueName: \"kubernetes.io/projected/1b25887e-9a4b-4100-b672-2d46c34cf1e0-kube-api-access-fpwzk\") pod \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\" (UID: \"1b25887e-9a4b-4100-b672-2d46c34cf1e0\") " Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.512932 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b25887e-9a4b-4100-b672-2d46c34cf1e0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1b25887e-9a4b-4100-b672-2d46c34cf1e0" (UID: "1b25887e-9a4b-4100-b672-2d46c34cf1e0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.532498 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b25887e-9a4b-4100-b672-2d46c34cf1e0-kube-api-access-fpwzk" (OuterVolumeSpecName: "kube-api-access-fpwzk") pod "1b25887e-9a4b-4100-b672-2d46c34cf1e0" (UID: "1b25887e-9a4b-4100-b672-2d46c34cf1e0"). InnerVolumeSpecName "kube-api-access-fpwzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.563352 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.616941 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpwzk\" (UniqueName: \"kubernetes.io/projected/1b25887e-9a4b-4100-b672-2d46c34cf1e0-kube-api-access-fpwzk\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.617012 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b25887e-9a4b-4100-b672-2d46c34cf1e0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.638069 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" podStartSLOduration=16.63804933 podStartE2EDuration="16.63804933s" podCreationTimestamp="2026-01-23 11:13:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:46.484712129 +0000 UTC m=+1491.109391988" watchObservedRunningTime="2026-01-23 11:13:46.63804933 +0000 UTC m=+1491.262729189" Jan 23 11:13:46 crc kubenswrapper[4689]: I0123 11:13:46.697913 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8wtbq"] Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.480886 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2edb6824-46b3-40f9-8bef-8ec1a068ed8e","Type":"ContainerStarted","Data":"d6d2360e4468e9f85f41709b176493f28ceae9057f701713d157284ec4b4b89f"} Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.507835 4689 generic.go:334] "Generic (PLEG): container finished" podID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerID="f76e43d3ad93467e52bec55d6b086c5334941560d007637444f18911a93e5e1f" exitCode=1 Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.507938 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" event={"ID":"3ec58659-52d0-4a0d-a7b6-f6d1488be93c","Type":"ContainerDied","Data":"f76e43d3ad93467e52bec55d6b086c5334941560d007637444f18911a93e5e1f"} Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.508741 4689 scope.go:117] "RemoveContainer" containerID="f76e43d3ad93467e52bec55d6b086c5334941560d007637444f18911a93e5e1f" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.541526 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerStarted","Data":"80d382f9b183f1353b5025df871cba6420d725cc0d8981ff50296cad9aba4606"} Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.559229 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" event={"ID":"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2","Type":"ContainerDied","Data":"f4c1305b3821edd3c03ccce906705f7bd693bfa22f66a2ef226cbb4bb9015f64"} Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.559488 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4c1305b3821edd3c03ccce906705f7bd693bfa22f66a2ef226cbb4bb9015f64" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.560524 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.561711 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fbcd48894-jz4gg" event={"ID":"cd9d1a8d-fd0e-4155-8085-5584c456cecb","Type":"ContainerStarted","Data":"c7541d58f36f5a6d744f87fb54b40c1db0c29b2e35533d5d8be8ef20959b28ad"} Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.562701 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.582172 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-99bf8995f-hqj6x" event={"ID":"e81e6348-c4b6-4601-9ad2-1df770e175a6","Type":"ContainerStarted","Data":"2e79fa652c01b6d713b9de90a67d98f9acd994243840da4d3721a06abf08526d"} Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.583104 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.622751 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5fbcd48894-jz4gg" podStartSLOduration=5.604288153 podStartE2EDuration="7.622729473s" podCreationTimestamp="2026-01-23 11:13:40 +0000 UTC" firstStartedPulling="2026-01-23 11:13:43.898529535 +0000 UTC m=+1488.523209394" lastFinishedPulling="2026-01-23 11:13:45.916970855 +0000 UTC m=+1490.541650714" observedRunningTime="2026-01-23 11:13:47.610640842 +0000 UTC m=+1492.235320701" watchObservedRunningTime="2026-01-23 11:13:47.622729473 +0000 UTC m=+1492.247409342" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.623021 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8wtbq" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="registry-server" containerID="cri-o://0d8088a4de1cdda262a8e859efdc1e86d7af2369f57fc39ec6473db28b41ab97" gracePeriod=2 Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.625696 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"21f5e336-9a50-43ce-8816-46552dcc4b43","Type":"ContainerStarted","Data":"e2f0969c73823b7ef8dc4b77aaa8cc07504fd2ec83006f1177ff19a22c9ba9f3"} Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.625808 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f6ce-account-create-update-6j84v" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.628663 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.657976 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-99bf8995f-hqj6x" podStartSLOduration=5.566938258 podStartE2EDuration="10.65795229s" podCreationTimestamp="2026-01-23 11:13:37 +0000 UTC" firstStartedPulling="2026-01-23 11:13:40.047335604 +0000 UTC m=+1484.672015463" lastFinishedPulling="2026-01-23 11:13:45.138349636 +0000 UTC m=+1489.763029495" observedRunningTime="2026-01-23 11:13:47.62741663 +0000 UTC m=+1492.252096489" watchObservedRunningTime="2026-01-23 11:13:47.65795229 +0000 UTC m=+1492.282632149" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.700925 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=9.700898981 podStartE2EDuration="9.700898981s" podCreationTimestamp="2026-01-23 11:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:47.660788581 +0000 UTC m=+1492.285468440" watchObservedRunningTime="2026-01-23 11:13:47.700898981 +0000 UTC m=+1492.325578850" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.719010 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbk5q\" (UniqueName: \"kubernetes.io/projected/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-kube-api-access-kbk5q\") pod \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.719351 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-operator-scripts\") pod \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\" (UID: \"2776dc4d-d084-461d-9d4a-d4cccfcb1dc2\") " Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.724330 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2776dc4d-d084-461d-9d4a-d4cccfcb1dc2" (UID: "2776dc4d-d084-461d-9d4a-d4cccfcb1dc2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.757802 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-kube-api-access-kbk5q" (OuterVolumeSpecName: "kube-api-access-kbk5q") pod "2776dc4d-d084-461d-9d4a-d4cccfcb1dc2" (UID: "2776dc4d-d084-461d-9d4a-d4cccfcb1dc2"). InnerVolumeSpecName "kube-api-access-kbk5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.774483 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.795072 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.823026 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.823064 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbk5q\" (UniqueName: \"kubernetes.io/projected/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2-kube-api-access-kbk5q\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.924915 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fthlg\" (UniqueName: \"kubernetes.io/projected/3cde63b4-6033-4530-884d-55d2274538c5-kube-api-access-fthlg\") pod \"3cde63b4-6033-4530-884d-55d2274538c5\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.925353 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3cde63b4-6033-4530-884d-55d2274538c5-operator-scripts\") pod \"3cde63b4-6033-4530-884d-55d2274538c5\" (UID: \"3cde63b4-6033-4530-884d-55d2274538c5\") " Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.926887 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cde63b4-6033-4530-884d-55d2274538c5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3cde63b4-6033-4530-884d-55d2274538c5" (UID: "3cde63b4-6033-4530-884d-55d2274538c5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:47 crc kubenswrapper[4689]: I0123 11:13:47.936865 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cde63b4-6033-4530-884d-55d2274538c5-kube-api-access-fthlg" (OuterVolumeSpecName: "kube-api-access-fthlg") pod "3cde63b4-6033-4530-884d-55d2274538c5" (UID: "3cde63b4-6033-4530-884d-55d2274538c5"). InnerVolumeSpecName "kube-api-access-fthlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.027832 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3cde63b4-6033-4530-884d-55d2274538c5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.027863 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fthlg\" (UniqueName: \"kubernetes.io/projected/3cde63b4-6033-4530-884d-55d2274538c5-kube-api-access-fthlg\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.366673 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5b54798db9-jfwb5" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.712717 4689 generic.go:334] "Generic (PLEG): container finished" podID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerID="2e79fa652c01b6d713b9de90a67d98f9acd994243840da4d3721a06abf08526d" exitCode=1 Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.713368 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-99bf8995f-hqj6x" event={"ID":"e81e6348-c4b6-4601-9ad2-1df770e175a6","Type":"ContainerDied","Data":"2e79fa652c01b6d713b9de90a67d98f9acd994243840da4d3721a06abf08526d"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.714301 4689 scope.go:117] "RemoveContainer" containerID="2e79fa652c01b6d713b9de90a67d98f9acd994243840da4d3721a06abf08526d" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.755417 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f707-account-create-update-w6nbh" event={"ID":"3cde63b4-6033-4530-884d-55d2274538c5","Type":"ContainerDied","Data":"c2b459f9b55bac1b8255af89d3ea8b1d6b81ae6b09e41ca02f10f071eef4936c"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.755456 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2b459f9b55bac1b8255af89d3ea8b1d6b81ae6b09e41ca02f10f071eef4936c" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.755533 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f707-account-create-update-w6nbh" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.800670 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.801269 4689 generic.go:334] "Generic (PLEG): container finished" podID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerID="b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9" exitCode=0 Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.801362 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3415e627-33c2-4457-9cea-4dbd78f4d2b3","Type":"ContainerDied","Data":"b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.801400 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3415e627-33c2-4457-9cea-4dbd78f4d2b3","Type":"ContainerDied","Data":"62a7d57a2a99bd99c49e43a2d7f7993695f5068357abbcccbb40f00bc57e3cd0"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.801448 4689 scope.go:117] "RemoveContainer" containerID="b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.823806 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2edb6824-46b3-40f9-8bef-8ec1a068ed8e","Type":"ContainerStarted","Data":"2878f482751c6fcdfadbec5affe22c746e33eb5950918f1ba27a47e7bb1ccf26"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.840687 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" event={"ID":"4b102ee6-4978-45ed-9026-94e1c433d3f6","Type":"ContainerStarted","Data":"3b8b75a4bd6042b7f5dbf9033caaa0336686db6e8c4eb7a3677138a36f8922c5"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.841350 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.852170 4689 generic.go:334] "Generic (PLEG): container finished" podID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerID="0d8088a4de1cdda262a8e859efdc1e86d7af2369f57fc39ec6473db28b41ab97" exitCode=0 Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.852244 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8wtbq" event={"ID":"24b7f85c-d5f6-4f5b-bb12-887a7c435c60","Type":"ContainerDied","Data":"0d8088a4de1cdda262a8e859efdc1e86d7af2369f57fc39ec6473db28b41ab97"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.852271 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8wtbq" event={"ID":"24b7f85c-d5f6-4f5b-bb12-887a7c435c60","Type":"ContainerDied","Data":"b9e63a772585637227422d874d26993378f985f1b754c7d8578987ab316c7389"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.852282 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9e63a772585637227422d874d26993378f985f1b754c7d8578987ab316c7389" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.868836 4689 scope.go:117] "RemoveContainer" containerID="0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.886313 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.894927 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" event={"ID":"3ec58659-52d0-4a0d-a7b6-f6d1488be93c","Type":"ContainerStarted","Data":"2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78"} Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.895218 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c4d1-account-create-update-9rq2z" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.939296 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" podStartSLOduration=7.558217545 podStartE2EDuration="8.939271794s" podCreationTimestamp="2026-01-23 11:13:40 +0000 UTC" firstStartedPulling="2026-01-23 11:13:43.902803392 +0000 UTC m=+1488.527483261" lastFinishedPulling="2026-01-23 11:13:45.283857651 +0000 UTC m=+1489.908537510" observedRunningTime="2026-01-23 11:13:48.870678865 +0000 UTC m=+1493.495358724" watchObservedRunningTime="2026-01-23 11:13:48.939271794 +0000 UTC m=+1493.563951653" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.973387 4689 scope.go:117] "RemoveContainer" containerID="b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9" Jan 23 11:13:48 crc kubenswrapper[4689]: E0123 11:13:48.981031 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9\": container with ID starting with b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9 not found: ID does not exist" containerID="b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.981073 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9"} err="failed to get container status \"b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9\": rpc error: code = NotFound desc = could not find container \"b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9\": container with ID starting with b72aa04fdbe8422a2d181e7646010b02a98c5736f6a339b8c85781fb92f12ce9 not found: ID does not exist" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.981098 4689 scope.go:117] "RemoveContainer" containerID="0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635" Jan 23 11:13:48 crc kubenswrapper[4689]: E0123 11:13:48.986020 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635\": container with ID starting with 0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635 not found: ID does not exist" containerID="0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.986057 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635"} err="failed to get container status \"0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635\": rpc error: code = NotFound desc = could not find container \"0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635\": container with ID starting with 0d9d06035cd7fbe76d3d9f796c75b93ba7bc237071fe6da6f212fbdbb54bc635 not found: ID does not exist" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.986786 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75xds\" (UniqueName: \"kubernetes.io/projected/3415e627-33c2-4457-9cea-4dbd78f4d2b3-kube-api-access-75xds\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.986828 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-logs\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.986914 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-internal-tls-certs\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.986988 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-scripts\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.987136 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-utilities\") pod \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.987172 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-combined-ca-bundle\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.991350 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-logs" (OuterVolumeSpecName: "logs") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.991986 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-utilities" (OuterVolumeSpecName: "utilities") pod "24b7f85c-d5f6-4f5b-bb12-887a7c435c60" (UID: "24b7f85c-d5f6-4f5b-bb12-887a7c435c60"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.993296 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.993355 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-config-data\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.993400 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-922mk\" (UniqueName: \"kubernetes.io/projected/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-kube-api-access-922mk\") pod \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.993483 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content\") pod \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.993533 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-httpd-run\") pod \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\" (UID: \"3415e627-33c2-4457-9cea-4dbd78f4d2b3\") " Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.994401 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.998545 4689 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.998576 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3415e627-33c2-4457-9cea-4dbd78f4d2b3-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:48 crc kubenswrapper[4689]: I0123 11:13:48.998585 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.000202 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3415e627-33c2-4457-9cea-4dbd78f4d2b3-kube-api-access-75xds" (OuterVolumeSpecName: "kube-api-access-75xds") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "kube-api-access-75xds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.003044 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-scripts" (OuterVolumeSpecName: "scripts") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.010764 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-kube-api-access-922mk" (OuterVolumeSpecName: "kube-api-access-922mk") pod "24b7f85c-d5f6-4f5b-bb12-887a7c435c60" (UID: "24b7f85c-d5f6-4f5b-bb12-887a7c435c60"). InnerVolumeSpecName "kube-api-access-922mk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.056694 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44" (OuterVolumeSpecName: "glance") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.064881 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.101831 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.101861 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.101884 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") on node \"crc\" " Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.101895 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-922mk\" (UniqueName: \"kubernetes.io/projected/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-kube-api-access-922mk\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.101906 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75xds\" (UniqueName: \"kubernetes.io/projected/3415e627-33c2-4457-9cea-4dbd78f4d2b3-kube-api-access-75xds\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.155140 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.170373 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.170534 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44") on node "crc" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.189333 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-config-data" (OuterVolumeSpecName: "config-data") pod "3415e627-33c2-4457-9cea-4dbd78f4d2b3" (UID: "3415e627-33c2-4457-9cea-4dbd78f4d2b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.203454 4689 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.203481 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.203492 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3415e627-33c2-4457-9cea-4dbd78f4d2b3-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.303598 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24b7f85c-d5f6-4f5b-bb12-887a7c435c60" (UID: "24b7f85c-d5f6-4f5b-bb12-887a7c435c60"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.304827 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content\") pod \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\" (UID: \"24b7f85c-d5f6-4f5b-bb12-887a7c435c60\") " Jan 23 11:13:49 crc kubenswrapper[4689]: W0123 11:13:49.304986 4689 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/24b7f85c-d5f6-4f5b-bb12-887a7c435c60/volumes/kubernetes.io~empty-dir/catalog-content Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.305021 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24b7f85c-d5f6-4f5b-bb12-887a7c435c60" (UID: "24b7f85c-d5f6-4f5b-bb12-887a7c435c60"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.306316 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24b7f85c-d5f6-4f5b-bb12-887a7c435c60-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.910289 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.913762 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2edb6824-46b3-40f9-8bef-8ec1a068ed8e","Type":"ContainerStarted","Data":"e608ed3819df9bb0c75aa6a1ab32ae01d9dc4e743c566f7124fb77647d5cf908"} Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.917211 4689 generic.go:334] "Generic (PLEG): container finished" podID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerID="2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78" exitCode=1 Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.917270 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" event={"ID":"3ec58659-52d0-4a0d-a7b6-f6d1488be93c","Type":"ContainerDied","Data":"2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78"} Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.917318 4689 scope.go:117] "RemoveContainer" containerID="f76e43d3ad93467e52bec55d6b086c5334941560d007637444f18911a93e5e1f" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.918223 4689 scope.go:117] "RemoveContainer" containerID="2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78" Jan 23 11:13:49 crc kubenswrapper[4689]: E0123 11:13:49.918663 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-67ddbdd968-r44lr_openstack(3ec58659-52d0-4a0d-a7b6-f6d1488be93c)\"" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.945745 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerStarted","Data":"626b001facf3802fc30f7fa687c83a0eaec38b53e74dcc5b5c14ec8263834c70"} Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.948386 4689 generic.go:334] "Generic (PLEG): container finished" podID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerID="b0c1ef6b8840df8983ccfd0a7caf5854ebde3f71b75f3423ebb75ec88a3221a7" exitCode=1 Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.950287 4689 scope.go:117] "RemoveContainer" containerID="b0c1ef6b8840df8983ccfd0a7caf5854ebde3f71b75f3423ebb75ec88a3221a7" Jan 23 11:13:49 crc kubenswrapper[4689]: E0123 11:13:49.950581 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-99bf8995f-hqj6x_openstack(e81e6348-c4b6-4601-9ad2-1df770e175a6)\"" pod="openstack/heat-api-99bf8995f-hqj6x" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.950812 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-99bf8995f-hqj6x" event={"ID":"e81e6348-c4b6-4601-9ad2-1df770e175a6","Type":"ContainerDied","Data":"b0c1ef6b8840df8983ccfd0a7caf5854ebde3f71b75f3423ebb75ec88a3221a7"} Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.950891 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8wtbq" Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.951575 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.967866 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:13:49 crc kubenswrapper[4689]: I0123 11:13:49.980569 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.980548397 podStartE2EDuration="5.980548397s" podCreationTimestamp="2026-01-23 11:13:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:49.961041641 +0000 UTC m=+1494.585721500" watchObservedRunningTime="2026-01-23 11:13:49.980548397 +0000 UTC m=+1494.605228256" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.002937 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003506 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2776dc4d-d084-461d-9d4a-d4cccfcb1dc2" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003528 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2776dc4d-d084-461d-9d4a-d4cccfcb1dc2" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003549 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="registry-server" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003559 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="registry-server" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003572 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="extract-content" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003581 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="extract-content" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003599 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cde63b4-6033-4530-884d-55d2274538c5" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003606 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cde63b4-6033-4530-884d-55d2274538c5" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003623 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="extract-utilities" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003631 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="extract-utilities" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003667 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b25887e-9a4b-4100-b672-2d46c34cf1e0" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003678 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b25887e-9a4b-4100-b672-2d46c34cf1e0" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003699 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-log" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003707 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-log" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.003717 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-httpd" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.003724 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-httpd" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.004005 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2776dc4d-d084-461d-9d4a-d4cccfcb1dc2" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.004025 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-log" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.004040 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" containerName="registry-server" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.004050 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cde63b4-6033-4530-884d-55d2274538c5" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.004067 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" containerName="glance-httpd" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.004085 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b25887e-9a4b-4100-b672-2d46c34cf1e0" containerName="mariadb-account-create-update" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.005885 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.008449 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.008590 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.013820 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.050070 4689 scope.go:117] "RemoveContainer" containerID="2e79fa652c01b6d713b9de90a67d98f9acd994243840da4d3721a06abf08526d" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.125786 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.125889 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.125971 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.126068 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58c805fc-4794-41c6-a425-9d9efbac01a3-logs\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.126262 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.126947 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.127274 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-654cf\" (UniqueName: \"kubernetes.io/projected/58c805fc-4794-41c6-a425-9d9efbac01a3-kube-api-access-654cf\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.127580 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58c805fc-4794-41c6-a425-9d9efbac01a3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.150629 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8wtbq"] Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.189673 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8wtbq"] Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231670 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231750 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-654cf\" (UniqueName: \"kubernetes.io/projected/58c805fc-4794-41c6-a425-9d9efbac01a3-kube-api-access-654cf\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231804 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58c805fc-4794-41c6-a425-9d9efbac01a3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231848 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231867 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231894 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231939 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58c805fc-4794-41c6-a425-9d9efbac01a3-logs\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.231997 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.233731 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58c805fc-4794-41c6-a425-9d9efbac01a3-logs\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.237386 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58c805fc-4794-41c6-a425-9d9efbac01a3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.238195 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.238801 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.240715 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.240743 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/13eb8beee2c91a18127e0c8709aa1780f04c8bb652adcd05937e8ea03b9a58c8/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.240854 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.250097 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58c805fc-4794-41c6-a425-9d9efbac01a3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.283231 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-654cf\" (UniqueName: \"kubernetes.io/projected/58c805fc-4794-41c6-a425-9d9efbac01a3-kube-api-access-654cf\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.542915 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9080a0da-0a62-4e24-a927-49fdcdd49c44\") pod \"glance-default-internal-api-0\" (UID: \"58c805fc-4794-41c6-a425-9d9efbac01a3\") " pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.727652 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.879629 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.952696 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-rt2sk"] Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.954510 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" podUID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerName="dnsmasq-dns" containerID="cri-o://f470a83fadb9b2103418b52dff07e0f8881f21600363d8299b24a5cc45121b1f" gracePeriod=10 Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.982682 4689 scope.go:117] "RemoveContainer" containerID="2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.982931 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-67ddbdd968-r44lr_openstack(3ec58659-52d0-4a0d-a7b6-f6d1488be93c)\"" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" Jan 23 11:13:50 crc kubenswrapper[4689]: I0123 11:13:50.991142 4689 scope.go:117] "RemoveContainer" containerID="b0c1ef6b8840df8983ccfd0a7caf5854ebde3f71b75f3423ebb75ec88a3221a7" Jan 23 11:13:50 crc kubenswrapper[4689]: E0123 11:13:50.991385 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-99bf8995f-hqj6x_openstack(e81e6348-c4b6-4601-9ad2-1df770e175a6)\"" pod="openstack/heat-api-99bf8995f-hqj6x" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" Jan 23 11:13:51 crc kubenswrapper[4689]: I0123 11:13:51.071245 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:13:51 crc kubenswrapper[4689]: W0123 11:13:51.387748 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod58c805fc_4794_41c6_a425_9d9efbac01a3.slice/crio-73640684c41dd1a25e002f346b5a15f5e80a5d5346710eccee5c8d0511fc1280 WatchSource:0}: Error finding container 73640684c41dd1a25e002f346b5a15f5e80a5d5346710eccee5c8d0511fc1280: Status 404 returned error can't find the container with id 73640684c41dd1a25e002f346b5a15f5e80a5d5346710eccee5c8d0511fc1280 Jan 23 11:13:51 crc kubenswrapper[4689]: I0123 11:13:51.403248 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 11:13:51 crc kubenswrapper[4689]: I0123 11:13:51.669753 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24b7f85c-d5f6-4f5b-bb12-887a7c435c60" path="/var/lib/kubelet/pods/24b7f85c-d5f6-4f5b-bb12-887a7c435c60/volumes" Jan 23 11:13:51 crc kubenswrapper[4689]: I0123 11:13:51.670958 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3415e627-33c2-4457-9cea-4dbd78f4d2b3" path="/var/lib/kubelet/pods/3415e627-33c2-4457-9cea-4dbd78f4d2b3/volumes" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.039025 4689 generic.go:334] "Generic (PLEG): container finished" podID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerID="f470a83fadb9b2103418b52dff07e0f8881f21600363d8299b24a5cc45121b1f" exitCode=0 Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.039215 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" event={"ID":"43b8b0e2-f632-41a4-846f-d1111c26633a","Type":"ContainerDied","Data":"f470a83fadb9b2103418b52dff07e0f8881f21600363d8299b24a5cc45121b1f"} Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.043258 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"58c805fc-4794-41c6-a425-9d9efbac01a3","Type":"ContainerStarted","Data":"73640684c41dd1a25e002f346b5a15f5e80a5d5346710eccee5c8d0511fc1280"} Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.223497 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.273089 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fq49m"] Jan 23 11:13:52 crc kubenswrapper[4689]: E0123 11:13:52.273646 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerName="dnsmasq-dns" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.273661 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerName="dnsmasq-dns" Jan 23 11:13:52 crc kubenswrapper[4689]: E0123 11:13:52.273700 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerName="init" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.273706 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerName="init" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.273963 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="43b8b0e2-f632-41a4-846f-d1111c26633a" containerName="dnsmasq-dns" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.275987 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.283760 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fq49m"] Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.284716 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.284737 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-54xtc" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.284945 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.304049 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7m5v\" (UniqueName: \"kubernetes.io/projected/43b8b0e2-f632-41a4-846f-d1111c26633a-kube-api-access-k7m5v\") pod \"43b8b0e2-f632-41a4-846f-d1111c26633a\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.304135 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc\") pod \"43b8b0e2-f632-41a4-846f-d1111c26633a\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.304475 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-nb\") pod \"43b8b0e2-f632-41a4-846f-d1111c26633a\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.304517 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-sb\") pod \"43b8b0e2-f632-41a4-846f-d1111c26633a\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.304710 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-swift-storage-0\") pod \"43b8b0e2-f632-41a4-846f-d1111c26633a\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.304825 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-config\") pod \"43b8b0e2-f632-41a4-846f-d1111c26633a\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.305244 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.305357 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qmqv\" (UniqueName: \"kubernetes.io/projected/f705ffdf-98ca-48b4-bd00-1a4804326940-kube-api-access-5qmqv\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.305605 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-config-data\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.305813 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-scripts\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.315432 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43b8b0e2-f632-41a4-846f-d1111c26633a-kube-api-access-k7m5v" (OuterVolumeSpecName: "kube-api-access-k7m5v") pod "43b8b0e2-f632-41a4-846f-d1111c26633a" (UID: "43b8b0e2-f632-41a4-846f-d1111c26633a"). InnerVolumeSpecName "kube-api-access-k7m5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.409029 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "43b8b0e2-f632-41a4-846f-d1111c26633a" (UID: "43b8b0e2-f632-41a4-846f-d1111c26633a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.409645 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc\") pod \"43b8b0e2-f632-41a4-846f-d1111c26633a\" (UID: \"43b8b0e2-f632-41a4-846f-d1111c26633a\") " Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.410571 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-scripts\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.410690 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.410738 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qmqv\" (UniqueName: \"kubernetes.io/projected/f705ffdf-98ca-48b4-bd00-1a4804326940-kube-api-access-5qmqv\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.410838 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-config-data\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.410912 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7m5v\" (UniqueName: \"kubernetes.io/projected/43b8b0e2-f632-41a4-846f-d1111c26633a-kube-api-access-k7m5v\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:52 crc kubenswrapper[4689]: W0123 11:13:52.412079 4689 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/43b8b0e2-f632-41a4-846f-d1111c26633a/volumes/kubernetes.io~configmap/dns-svc Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.412237 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "43b8b0e2-f632-41a4-846f-d1111c26633a" (UID: "43b8b0e2-f632-41a4-846f-d1111c26633a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.422451 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-config" (OuterVolumeSpecName: "config") pod "43b8b0e2-f632-41a4-846f-d1111c26633a" (UID: "43b8b0e2-f632-41a4-846f-d1111c26633a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.423400 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-scripts\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.428614 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-config-data\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.428868 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.440877 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qmqv\" (UniqueName: \"kubernetes.io/projected/f705ffdf-98ca-48b4-bd00-1a4804326940-kube-api-access-5qmqv\") pod \"nova-cell0-conductor-db-sync-fq49m\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.469405 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "43b8b0e2-f632-41a4-846f-d1111c26633a" (UID: "43b8b0e2-f632-41a4-846f-d1111c26633a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.503063 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "43b8b0e2-f632-41a4-846f-d1111c26633a" (UID: "43b8b0e2-f632-41a4-846f-d1111c26633a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.508691 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "43b8b0e2-f632-41a4-846f-d1111c26633a" (UID: "43b8b0e2-f632-41a4-846f-d1111c26633a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.528869 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.528906 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.528918 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.528929 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.528939 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43b8b0e2-f632-41a4-846f-d1111c26633a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.621088 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.780997 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.782134 4689 scope.go:117] "RemoveContainer" containerID="b0c1ef6b8840df8983ccfd0a7caf5854ebde3f71b75f3423ebb75ec88a3221a7" Jan 23 11:13:52 crc kubenswrapper[4689]: E0123 11:13:52.782487 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-99bf8995f-hqj6x_openstack(e81e6348-c4b6-4601-9ad2-1df770e175a6)\"" pod="openstack/heat-api-99bf8995f-hqj6x" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.783436 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.802373 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.804391 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:13:52 crc kubenswrapper[4689]: I0123 11:13:52.805375 4689 scope.go:117] "RemoveContainer" containerID="2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78" Jan 23 11:13:52 crc kubenswrapper[4689]: E0123 11:13:52.805697 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-67ddbdd968-r44lr_openstack(3ec58659-52d0-4a0d-a7b6-f6d1488be93c)\"" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.063108 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" event={"ID":"43b8b0e2-f632-41a4-846f-d1111c26633a","Type":"ContainerDied","Data":"08c3722ed70bd07990327fa8e1262d1317064eda3847b6d1579965f001284eb6"} Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.063183 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-rt2sk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.063188 4689 scope.go:117] "RemoveContainer" containerID="f470a83fadb9b2103418b52dff07e0f8881f21600363d8299b24a5cc45121b1f" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.075353 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerStarted","Data":"fd2989765c56fd99fd8f8b7f0a6c96d99cdbf3a27866924ce9a9e45a2b3ef4f6"} Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.075515 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-central-agent" containerID="cri-o://45fa74d7d96fc685ea6c478266a107679b854da356b20df54da7fea5a4153538" gracePeriod=30 Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.075761 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.075888 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="sg-core" containerID="cri-o://626b001facf3802fc30f7fa687c83a0eaec38b53e74dcc5b5c14ec8263834c70" gracePeriod=30 Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.075902 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-notification-agent" containerID="cri-o://80d382f9b183f1353b5025df871cba6420d725cc0d8981ff50296cad9aba4606" gracePeriod=30 Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.076125 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="proxy-httpd" containerID="cri-o://fd2989765c56fd99fd8f8b7f0a6c96d99cdbf3a27866924ce9a9e45a2b3ef4f6" gracePeriod=30 Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.082735 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"58c805fc-4794-41c6-a425-9d9efbac01a3","Type":"ContainerStarted","Data":"105e1f1043f3cdb943dc64429733308af3ecf24e7157c58dd4b9266bb1f539c1"} Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.083612 4689 scope.go:117] "RemoveContainer" containerID="b0c1ef6b8840df8983ccfd0a7caf5854ebde3f71b75f3423ebb75ec88a3221a7" Jan 23 11:13:53 crc kubenswrapper[4689]: E0123 11:13:53.083923 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-99bf8995f-hqj6x_openstack(e81e6348-c4b6-4601-9ad2-1df770e175a6)\"" pod="openstack/heat-api-99bf8995f-hqj6x" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.084276 4689 scope.go:117] "RemoveContainer" containerID="2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78" Jan 23 11:13:53 crc kubenswrapper[4689]: E0123 11:13:53.084549 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-67ddbdd968-r44lr_openstack(3ec58659-52d0-4a0d-a7b6-f6d1488be93c)\"" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.137444 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.815642091 podStartE2EDuration="16.13741871s" podCreationTimestamp="2026-01-23 11:13:37 +0000 UTC" firstStartedPulling="2026-01-23 11:13:40.172539553 +0000 UTC m=+1484.797219412" lastFinishedPulling="2026-01-23 11:13:51.494316172 +0000 UTC m=+1496.118996031" observedRunningTime="2026-01-23 11:13:53.106415148 +0000 UTC m=+1497.731095007" watchObservedRunningTime="2026-01-23 11:13:53.13741871 +0000 UTC m=+1497.762098569" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.161923 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-rt2sk"] Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.183415 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-rt2sk"] Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.195201 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fm7wk"] Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.197932 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.219482 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fm7wk"] Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.389970 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-catalog-content\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.390119 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttn4m\" (UniqueName: \"kubernetes.io/projected/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-kube-api-access-ttn4m\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.390238 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-utilities\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.442348 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.444727 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.497030 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-catalog-content\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.514530 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttn4m\" (UniqueName: \"kubernetes.io/projected/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-kube-api-access-ttn4m\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.515022 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-utilities\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.515528 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-utilities\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.499450 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-catalog-content\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.585005 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttn4m\" (UniqueName: \"kubernetes.io/projected/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-kube-api-access-ttn4m\") pod \"community-operators-fm7wk\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.614472 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-99bf8995f-hqj6x"] Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.667551 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43b8b0e2-f632-41a4-846f-d1111c26633a" path="/var/lib/kubelet/pods/43b8b0e2-f632-41a4-846f-d1111c26633a/volumes" Jan 23 11:13:53 crc kubenswrapper[4689]: I0123 11:13:53.828863 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:13:54 crc kubenswrapper[4689]: I0123 11:13:54.089594 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:13:54 crc kubenswrapper[4689]: I0123 11:13:54.102700 4689 generic.go:334] "Generic (PLEG): container finished" podID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerID="fd2989765c56fd99fd8f8b7f0a6c96d99cdbf3a27866924ce9a9e45a2b3ef4f6" exitCode=0 Jan 23 11:13:54 crc kubenswrapper[4689]: I0123 11:13:54.102735 4689 generic.go:334] "Generic (PLEG): container finished" podID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerID="626b001facf3802fc30f7fa687c83a0eaec38b53e74dcc5b5c14ec8263834c70" exitCode=2 Jan 23 11:13:54 crc kubenswrapper[4689]: I0123 11:13:54.102795 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerDied","Data":"fd2989765c56fd99fd8f8b7f0a6c96d99cdbf3a27866924ce9a9e45a2b3ef4f6"} Jan 23 11:13:54 crc kubenswrapper[4689]: I0123 11:13:54.102838 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerDied","Data":"626b001facf3802fc30f7fa687c83a0eaec38b53e74dcc5b5c14ec8263834c70"} Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.110934 4689 scope.go:117] "RemoveContainer" containerID="afc2a4b2e140d8738e750a36eb8d1bb3222248c9355e1d566b72b8a324ce3a9d" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.205801 4689 generic.go:334] "Generic (PLEG): container finished" podID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerID="80d382f9b183f1353b5025df871cba6420d725cc0d8981ff50296cad9aba4606" exitCode=0 Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.205831 4689 generic.go:334] "Generic (PLEG): container finished" podID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerID="45fa74d7d96fc685ea6c478266a107679b854da356b20df54da7fea5a4153538" exitCode=0 Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.205841 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerDied","Data":"80d382f9b183f1353b5025df871cba6420d725cc0d8981ff50296cad9aba4606"} Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.205883 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerDied","Data":"45fa74d7d96fc685ea6c478266a107679b854da356b20df54da7fea5a4153538"} Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.299713 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.300307 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.451440 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.467377 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.548541 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.582300 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fq49m"] Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.601479 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data-custom\") pod \"e81e6348-c4b6-4601-9ad2-1df770e175a6\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.601812 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data\") pod \"e81e6348-c4b6-4601-9ad2-1df770e175a6\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.601909 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-combined-ca-bundle\") pod \"e81e6348-c4b6-4601-9ad2-1df770e175a6\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.602076 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzckj\" (UniqueName: \"kubernetes.io/projected/e81e6348-c4b6-4601-9ad2-1df770e175a6-kube-api-access-lzckj\") pod \"e81e6348-c4b6-4601-9ad2-1df770e175a6\" (UID: \"e81e6348-c4b6-4601-9ad2-1df770e175a6\") " Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.609579 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e81e6348-c4b6-4601-9ad2-1df770e175a6-kube-api-access-lzckj" (OuterVolumeSpecName: "kube-api-access-lzckj") pod "e81e6348-c4b6-4601-9ad2-1df770e175a6" (UID: "e81e6348-c4b6-4601-9ad2-1df770e175a6"). InnerVolumeSpecName "kube-api-access-lzckj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.616470 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e81e6348-c4b6-4601-9ad2-1df770e175a6" (UID: "e81e6348-c4b6-4601-9ad2-1df770e175a6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.713794 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.714239 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzckj\" (UniqueName: \"kubernetes.io/projected/e81e6348-c4b6-4601-9ad2-1df770e175a6-kube-api-access-lzckj\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.751614 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e81e6348-c4b6-4601-9ad2-1df770e175a6" (UID: "e81e6348-c4b6-4601-9ad2-1df770e175a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.755976 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data" (OuterVolumeSpecName: "config-data") pod "e81e6348-c4b6-4601-9ad2-1df770e175a6" (UID: "e81e6348-c4b6-4601-9ad2-1df770e175a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:55 crc kubenswrapper[4689]: E0123 11:13:55.760831 4689 info.go:109] Failed to get network devices: open /sys/class/net/f7430e36ead9927/address: no such file or directory Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.816246 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.816279 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e81e6348-c4b6-4601-9ad2-1df770e175a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:55 crc kubenswrapper[4689]: I0123 11:13:55.871068 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.023114 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-config-data\") pod \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.023516 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zdqx\" (UniqueName: \"kubernetes.io/projected/afe9ed58-5b1a-4e7f-8598-aab9c465adae-kube-api-access-9zdqx\") pod \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.023651 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-sg-core-conf-yaml\") pod \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.024504 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-run-httpd\") pod \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.024534 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-log-httpd\") pod \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.024560 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-scripts\") pod \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.024708 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-combined-ca-bundle\") pod \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\" (UID: \"afe9ed58-5b1a-4e7f-8598-aab9c465adae\") " Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.026015 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "afe9ed58-5b1a-4e7f-8598-aab9c465adae" (UID: "afe9ed58-5b1a-4e7f-8598-aab9c465adae"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.026411 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "afe9ed58-5b1a-4e7f-8598-aab9c465adae" (UID: "afe9ed58-5b1a-4e7f-8598-aab9c465adae"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.027652 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afe9ed58-5b1a-4e7f-8598-aab9c465adae-kube-api-access-9zdqx" (OuterVolumeSpecName: "kube-api-access-9zdqx") pod "afe9ed58-5b1a-4e7f-8598-aab9c465adae" (UID: "afe9ed58-5b1a-4e7f-8598-aab9c465adae"). InnerVolumeSpecName "kube-api-access-9zdqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.029635 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-scripts" (OuterVolumeSpecName: "scripts") pod "afe9ed58-5b1a-4e7f-8598-aab9c465adae" (UID: "afe9ed58-5b1a-4e7f-8598-aab9c465adae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.078896 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fm7wk"] Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.086314 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "afe9ed58-5b1a-4e7f-8598-aab9c465adae" (UID: "afe9ed58-5b1a-4e7f-8598-aab9c465adae"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.131609 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zdqx\" (UniqueName: \"kubernetes.io/projected/afe9ed58-5b1a-4e7f-8598-aab9c465adae-kube-api-access-9zdqx\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.131960 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.131974 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.131986 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afe9ed58-5b1a-4e7f-8598-aab9c465adae-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.131997 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.199836 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "afe9ed58-5b1a-4e7f-8598-aab9c465adae" (UID: "afe9ed58-5b1a-4e7f-8598-aab9c465adae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.208696 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-config-data" (OuterVolumeSpecName: "config-data") pod "afe9ed58-5b1a-4e7f-8598-aab9c465adae" (UID: "afe9ed58-5b1a-4e7f-8598-aab9c465adae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.229114 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-99bf8995f-hqj6x" event={"ID":"e81e6348-c4b6-4601-9ad2-1df770e175a6","Type":"ContainerDied","Data":"028e67a89836969a70943b33c2747d51d516dc31febeb8e2282d6a1e8d46b54f"} Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.229184 4689 scope.go:117] "RemoveContainer" containerID="b0c1ef6b8840df8983ccfd0a7caf5854ebde3f71b75f3423ebb75ec88a3221a7" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.229272 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-99bf8995f-hqj6x" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.234455 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.234481 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe9ed58-5b1a-4e7f-8598-aab9c465adae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.242475 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fm7wk" event={"ID":"a7b5e683-c7e9-46c4-8110-9e01c64d21a8","Type":"ContainerStarted","Data":"6d00911f28614c21d700e483bb763276e89f1e2530b0758edaaab85bf3ce200f"} Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.282140 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afe9ed58-5b1a-4e7f-8598-aab9c465adae","Type":"ContainerDied","Data":"f7430e36ead9927afa8e977b6f5426473c64a103e3ac5502ef3ed0a26a67f27e"} Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.282288 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.300355 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fq49m" event={"ID":"f705ffdf-98ca-48b4-bd00-1a4804326940","Type":"ContainerStarted","Data":"80b8b6d5cc5272818b0a373a0bb0683481922b47c563bd4838f26fd36c22ef07"} Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.308848 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"58c805fc-4794-41c6-a425-9d9efbac01a3","Type":"ContainerStarted","Data":"e6eb835b54ab36c4a47e0f2f68b4411b09205b5b9544238ab0cacd2d8b191ddf"} Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.309863 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.310856 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.359100 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.359075117 podStartE2EDuration="7.359075117s" podCreationTimestamp="2026-01-23 11:13:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:13:56.351986011 +0000 UTC m=+1500.976665870" watchObservedRunningTime="2026-01-23 11:13:56.359075117 +0000 UTC m=+1500.983754976" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.397344 4689 scope.go:117] "RemoveContainer" containerID="fd2989765c56fd99fd8f8b7f0a6c96d99cdbf3a27866924ce9a9e45a2b3ef4f6" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.441831 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.458297 4689 scope.go:117] "RemoveContainer" containerID="626b001facf3802fc30f7fa687c83a0eaec38b53e74dcc5b5c14ec8263834c70" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.468419 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.489140 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-99bf8995f-hqj6x"] Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.535877 4689 scope.go:117] "RemoveContainer" containerID="80d382f9b183f1353b5025df871cba6420d725cc0d8981ff50296cad9aba4606" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.550256 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-99bf8995f-hqj6x"] Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.565957 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:56 crc kubenswrapper[4689]: E0123 11:13:56.567112 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-central-agent" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567138 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-central-agent" Jan 23 11:13:56 crc kubenswrapper[4689]: E0123 11:13:56.567187 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="proxy-httpd" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567195 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="proxy-httpd" Jan 23 11:13:56 crc kubenswrapper[4689]: E0123 11:13:56.567213 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-notification-agent" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567267 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-notification-agent" Jan 23 11:13:56 crc kubenswrapper[4689]: E0123 11:13:56.567303 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="sg-core" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567311 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="sg-core" Jan 23 11:13:56 crc kubenswrapper[4689]: E0123 11:13:56.567326 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerName="heat-api" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567333 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerName="heat-api" Jan 23 11:13:56 crc kubenswrapper[4689]: E0123 11:13:56.567355 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerName="heat-api" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567361 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerName="heat-api" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567882 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerName="heat-api" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567929 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="proxy-httpd" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567953 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="sg-core" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567967 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-notification-agent" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.567978 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" containerName="heat-api" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.568016 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" containerName="ceilometer-central-agent" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.572517 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.582662 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.582666 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.645200 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.646628 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxjpn\" (UniqueName: \"kubernetes.io/projected/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-kube-api-access-lxjpn\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.646684 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-config-data\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.646722 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.646750 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.646862 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-run-httpd\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.646905 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-log-httpd\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.646929 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-scripts\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.749983 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxjpn\" (UniqueName: \"kubernetes.io/projected/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-kube-api-access-lxjpn\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.750069 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-config-data\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.750101 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.750139 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.750223 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-run-httpd\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.750251 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-log-httpd\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.750274 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-scripts\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.752987 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-run-httpd\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.754287 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-log-httpd\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.759000 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.759902 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-scripts\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.760975 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.762107 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-config-data\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.776792 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxjpn\" (UniqueName: \"kubernetes.io/projected/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-kube-api-access-lxjpn\") pod \"ceilometer-0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.930422 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:13:56 crc kubenswrapper[4689]: I0123 11:13:56.977863 4689 scope.go:117] "RemoveContainer" containerID="45fa74d7d96fc685ea6c478266a107679b854da356b20df54da7fea5a4153538" Jan 23 11:13:57 crc kubenswrapper[4689]: I0123 11:13:57.363080 4689 generic.go:334] "Generic (PLEG): container finished" podID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerID="b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce" exitCode=0 Jan 23 11:13:57 crc kubenswrapper[4689]: I0123 11:13:57.365639 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fm7wk" event={"ID":"a7b5e683-c7e9-46c4-8110-9e01c64d21a8","Type":"ContainerDied","Data":"b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce"} Jan 23 11:13:57 crc kubenswrapper[4689]: I0123 11:13:57.530600 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 23 11:13:57 crc kubenswrapper[4689]: I0123 11:13:57.560914 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:13:57 crc kubenswrapper[4689]: I0123 11:13:57.692175 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afe9ed58-5b1a-4e7f-8598-aab9c465adae" path="/var/lib/kubelet/pods/afe9ed58-5b1a-4e7f-8598-aab9c465adae/volumes" Jan 23 11:13:57 crc kubenswrapper[4689]: I0123 11:13:57.693233 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e81e6348-c4b6-4601-9ad2-1df770e175a6" path="/var/lib/kubelet/pods/e81e6348-c4b6-4601-9ad2-1df770e175a6/volumes" Jan 23 11:13:58 crc kubenswrapper[4689]: I0123 11:13:58.155971 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:13:58 crc kubenswrapper[4689]: I0123 11:13:58.267733 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-757d5d5668-vt8hl"] Jan 23 11:13:58 crc kubenswrapper[4689]: I0123 11:13:58.270701 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-757d5d5668-vt8hl" podUID="49fd7217-901d-4fa1-b3f2-2a883295cf83" containerName="heat-engine" containerID="cri-o://8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" gracePeriod=60 Jan 23 11:13:58 crc kubenswrapper[4689]: I0123 11:13:58.419619 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:13:58 crc kubenswrapper[4689]: I0123 11:13:58.419646 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:13:58 crc kubenswrapper[4689]: I0123 11:13:58.420303 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerStarted","Data":"9d677277b5beb51b13a7474bb55fe3b3cffb1bcef02405de35942056915e598e"} Jan 23 11:13:59 crc kubenswrapper[4689]: I0123 11:13:59.443759 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerStarted","Data":"720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2"} Jan 23 11:13:59 crc kubenswrapper[4689]: I0123 11:13:59.548589 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:13:59 crc kubenswrapper[4689]: I0123 11:13:59.599331 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-67ddbdd968-r44lr"] Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.231424 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.292579 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data-custom\") pod \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.292700 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-combined-ca-bundle\") pod \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.292758 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9z7f\" (UniqueName: \"kubernetes.io/projected/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-kube-api-access-t9z7f\") pod \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.292924 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data\") pod \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\" (UID: \"3ec58659-52d0-4a0d-a7b6-f6d1488be93c\") " Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.311198 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-kube-api-access-t9z7f" (OuterVolumeSpecName: "kube-api-access-t9z7f") pod "3ec58659-52d0-4a0d-a7b6-f6d1488be93c" (UID: "3ec58659-52d0-4a0d-a7b6-f6d1488be93c"). InnerVolumeSpecName "kube-api-access-t9z7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.324460 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3ec58659-52d0-4a0d-a7b6-f6d1488be93c" (UID: "3ec58659-52d0-4a0d-a7b6-f6d1488be93c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.379099 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ec58659-52d0-4a0d-a7b6-f6d1488be93c" (UID: "3ec58659-52d0-4a0d-a7b6-f6d1488be93c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.404879 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.404916 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.404927 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9z7f\" (UniqueName: \"kubernetes.io/projected/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-kube-api-access-t9z7f\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.450225 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data" (OuterVolumeSpecName: "config-data") pod "3ec58659-52d0-4a0d-a7b6-f6d1488be93c" (UID: "3ec58659-52d0-4a0d-a7b6-f6d1488be93c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.464004 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fm7wk" event={"ID":"a7b5e683-c7e9-46c4-8110-9e01c64d21a8","Type":"ContainerStarted","Data":"3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e"} Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.470630 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" event={"ID":"3ec58659-52d0-4a0d-a7b6-f6d1488be93c","Type":"ContainerDied","Data":"5576436c35e1ce064eac63b39a286ec0e7504ddb731908d102b29c4f3243dd5e"} Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.470677 4689 scope.go:117] "RemoveContainer" containerID="2b1ef207054833cedfe51c92ef5e3dd3f5846ab7397a2000cb85525f59a0be78" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.470776 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-67ddbdd968-r44lr" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.486909 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerStarted","Data":"1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f"} Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.508564 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ec58659-52d0-4a0d-a7b6-f6d1488be93c-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.534308 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-67ddbdd968-r44lr"] Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.549886 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-67ddbdd968-r44lr"] Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.728747 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.730407 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.827458 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:00 crc kubenswrapper[4689]: I0123 11:14:00.906993 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:01 crc kubenswrapper[4689]: E0123 11:14:01.009482 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:14:01 crc kubenswrapper[4689]: E0123 11:14:01.013784 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:14:01 crc kubenswrapper[4689]: E0123 11:14:01.021210 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:14:01 crc kubenswrapper[4689]: E0123 11:14:01.021566 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-757d5d5668-vt8hl" podUID="49fd7217-901d-4fa1-b3f2-2a883295cf83" containerName="heat-engine" Jan 23 11:14:01 crc kubenswrapper[4689]: I0123 11:14:01.517726 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerStarted","Data":"8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3"} Jan 23 11:14:01 crc kubenswrapper[4689]: I0123 11:14:01.519557 4689 generic.go:334] "Generic (PLEG): container finished" podID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerID="3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e" exitCode=0 Jan 23 11:14:01 crc kubenswrapper[4689]: I0123 11:14:01.519611 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fm7wk" event={"ID":"a7b5e683-c7e9-46c4-8110-9e01c64d21a8","Type":"ContainerDied","Data":"3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e"} Jan 23 11:14:01 crc kubenswrapper[4689]: I0123 11:14:01.535742 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:01 crc kubenswrapper[4689]: I0123 11:14:01.535770 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:01 crc kubenswrapper[4689]: I0123 11:14:01.684876 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" path="/var/lib/kubelet/pods/3ec58659-52d0-4a0d-a7b6-f6d1488be93c/volumes" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.311020 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.311624 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.311696 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.313062 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.313169 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" gracePeriod=600 Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.604882 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fm7wk" event={"ID":"a7b5e683-c7e9-46c4-8110-9e01c64d21a8","Type":"ContainerStarted","Data":"7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb"} Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.631612 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" exitCode=0 Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.631735 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.631744 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.632533 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e"} Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.632588 4689 scope.go:117] "RemoveContainer" containerID="eae776292d106a5845830ce3ec53dd7e23f7ffa3aa758190f8018f2db4651041" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.633803 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fm7wk" podStartSLOduration=5.755324039 podStartE2EDuration="10.633792735s" podCreationTimestamp="2026-01-23 11:13:53 +0000 UTC" firstStartedPulling="2026-01-23 11:13:57.367522912 +0000 UTC m=+1501.992202761" lastFinishedPulling="2026-01-23 11:14:02.245991598 +0000 UTC m=+1506.870671457" observedRunningTime="2026-01-23 11:14:03.630554044 +0000 UTC m=+1508.255233903" watchObservedRunningTime="2026-01-23 11:14:03.633792735 +0000 UTC m=+1508.258472594" Jan 23 11:14:03 crc kubenswrapper[4689]: E0123 11:14:03.800686 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.831412 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.831762 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:14:03 crc kubenswrapper[4689]: I0123 11:14:03.884417 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="21f5e336-9a50-43ce-8816-46552dcc4b43" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.227:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:14:04 crc kubenswrapper[4689]: I0123 11:14:04.407066 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 11:14:04 crc kubenswrapper[4689]: I0123 11:14:04.407212 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:14:04 crc kubenswrapper[4689]: I0123 11:14:04.540348 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 11:14:04 crc kubenswrapper[4689]: I0123 11:14:04.706747 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:14:04 crc kubenswrapper[4689]: E0123 11:14:04.711092 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:14:04 crc kubenswrapper[4689]: I0123 11:14:04.711764 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerStarted","Data":"f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635"} Jan 23 11:14:04 crc kubenswrapper[4689]: I0123 11:14:04.769754 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.360457586 podStartE2EDuration="8.769737807s" podCreationTimestamp="2026-01-23 11:13:56 +0000 UTC" firstStartedPulling="2026-01-23 11:13:57.595795649 +0000 UTC m=+1502.220475508" lastFinishedPulling="2026-01-23 11:14:03.00507587 +0000 UTC m=+1507.629755729" observedRunningTime="2026-01-23 11:14:04.754707012 +0000 UTC m=+1509.379386871" watchObservedRunningTime="2026-01-23 11:14:04.769737807 +0000 UTC m=+1509.394417666" Jan 23 11:14:04 crc kubenswrapper[4689]: I0123 11:14:04.895429 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-fm7wk" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="registry-server" probeResult="failure" output=< Jan 23 11:14:04 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:14:04 crc kubenswrapper[4689]: > Jan 23 11:14:05 crc kubenswrapper[4689]: I0123 11:14:05.732424 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:14:06 crc kubenswrapper[4689]: I0123 11:14:06.507509 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:06 crc kubenswrapper[4689]: I0123 11:14:06.507658 4689 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 11:14:07 crc kubenswrapper[4689]: I0123 11:14:07.035543 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 11:14:09 crc kubenswrapper[4689]: I0123 11:14:09.809946 4689 generic.go:334] "Generic (PLEG): container finished" podID="49fd7217-901d-4fa1-b3f2-2a883295cf83" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" exitCode=0 Jan 23 11:14:09 crc kubenswrapper[4689]: I0123 11:14:09.810104 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-757d5d5668-vt8hl" event={"ID":"49fd7217-901d-4fa1-b3f2-2a883295cf83","Type":"ContainerDied","Data":"8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209"} Jan 23 11:14:11 crc kubenswrapper[4689]: E0123 11:14:11.005833 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209 is running failed: container process not found" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:14:11 crc kubenswrapper[4689]: E0123 11:14:11.007202 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209 is running failed: container process not found" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:14:11 crc kubenswrapper[4689]: E0123 11:14:11.008011 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209 is running failed: container process not found" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:14:11 crc kubenswrapper[4689]: E0123 11:14:11.008063 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209 is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-757d5d5668-vt8hl" podUID="49fd7217-901d-4fa1-b3f2-2a883295cf83" containerName="heat-engine" Jan 23 11:14:13 crc kubenswrapper[4689]: I0123 11:14:13.983520 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:14:14 crc kubenswrapper[4689]: I0123 11:14:14.041074 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:14:14 crc kubenswrapper[4689]: I0123 11:14:14.226988 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fm7wk"] Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.222076 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.275955 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data-custom\") pod \"49fd7217-901d-4fa1-b3f2-2a883295cf83\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.276223 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgwcc\" (UniqueName: \"kubernetes.io/projected/49fd7217-901d-4fa1-b3f2-2a883295cf83-kube-api-access-sgwcc\") pod \"49fd7217-901d-4fa1-b3f2-2a883295cf83\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.276336 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-combined-ca-bundle\") pod \"49fd7217-901d-4fa1-b3f2-2a883295cf83\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.276431 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data\") pod \"49fd7217-901d-4fa1-b3f2-2a883295cf83\" (UID: \"49fd7217-901d-4fa1-b3f2-2a883295cf83\") " Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.282668 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "49fd7217-901d-4fa1-b3f2-2a883295cf83" (UID: "49fd7217-901d-4fa1-b3f2-2a883295cf83"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.287258 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49fd7217-901d-4fa1-b3f2-2a883295cf83-kube-api-access-sgwcc" (OuterVolumeSpecName: "kube-api-access-sgwcc") pod "49fd7217-901d-4fa1-b3f2-2a883295cf83" (UID: "49fd7217-901d-4fa1-b3f2-2a883295cf83"). InnerVolumeSpecName "kube-api-access-sgwcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.380220 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.380252 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgwcc\" (UniqueName: \"kubernetes.io/projected/49fd7217-901d-4fa1-b3f2-2a883295cf83-kube-api-access-sgwcc\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.408753 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49fd7217-901d-4fa1-b3f2-2a883295cf83" (UID: "49fd7217-901d-4fa1-b3f2-2a883295cf83"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.417457 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data" (OuterVolumeSpecName: "config-data") pod "49fd7217-901d-4fa1-b3f2-2a883295cf83" (UID: "49fd7217-901d-4fa1-b3f2-2a883295cf83"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.483689 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.483737 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49fd7217-901d-4fa1-b3f2-2a883295cf83-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.891979 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fq49m" event={"ID":"f705ffdf-98ca-48b4-bd00-1a4804326940","Type":"ContainerStarted","Data":"73ca42c6e19fb5542e75c8c2d4ca6a2e2cb7ec372f27fe3a7f40d1dd4f49213b"} Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.895284 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-757d5d5668-vt8hl" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.895310 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-757d5d5668-vt8hl" event={"ID":"49fd7217-901d-4fa1-b3f2-2a883295cf83","Type":"ContainerDied","Data":"00404ea4f88fa1626b109e4b15b31cc60fe12875bf0bfcbdcae405535a8d5dfb"} Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.895453 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fm7wk" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="registry-server" containerID="cri-o://7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb" gracePeriod=2 Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.895473 4689 scope.go:117] "RemoveContainer" containerID="8037ef554c4588131ce7d589257239a47f98f3a4aeeb397f80fbb0d5b4fd9209" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.912359 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-fq49m" podStartSLOduration=4.444548277 podStartE2EDuration="23.912334392s" podCreationTimestamp="2026-01-23 11:13:52 +0000 UTC" firstStartedPulling="2026-01-23 11:13:55.582673453 +0000 UTC m=+1500.207353312" lastFinishedPulling="2026-01-23 11:14:15.050459568 +0000 UTC m=+1519.675139427" observedRunningTime="2026-01-23 11:14:15.909395428 +0000 UTC m=+1520.534075357" watchObservedRunningTime="2026-01-23 11:14:15.912334392 +0000 UTC m=+1520.537014261" Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.935112 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-757d5d5668-vt8hl"] Jan 23 11:14:15 crc kubenswrapper[4689]: I0123 11:14:15.945534 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-757d5d5668-vt8hl"] Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.514280 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.623929 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttn4m\" (UniqueName: \"kubernetes.io/projected/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-kube-api-access-ttn4m\") pod \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.624315 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-catalog-content\") pod \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.624534 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-utilities\") pod \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\" (UID: \"a7b5e683-c7e9-46c4-8110-9e01c64d21a8\") " Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.625402 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-utilities" (OuterVolumeSpecName: "utilities") pod "a7b5e683-c7e9-46c4-8110-9e01c64d21a8" (UID: "a7b5e683-c7e9-46c4-8110-9e01c64d21a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.633470 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-kube-api-access-ttn4m" (OuterVolumeSpecName: "kube-api-access-ttn4m") pod "a7b5e683-c7e9-46c4-8110-9e01c64d21a8" (UID: "a7b5e683-c7e9-46c4-8110-9e01c64d21a8"). InnerVolumeSpecName "kube-api-access-ttn4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.694751 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7b5e683-c7e9-46c4-8110-9e01c64d21a8" (UID: "a7b5e683-c7e9-46c4-8110-9e01c64d21a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.729187 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.729226 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttn4m\" (UniqueName: \"kubernetes.io/projected/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-kube-api-access-ttn4m\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.729240 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5e683-c7e9-46c4-8110-9e01c64d21a8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.908498 4689 generic.go:334] "Generic (PLEG): container finished" podID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerID="7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb" exitCode=0 Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.909676 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fm7wk" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.912256 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fm7wk" event={"ID":"a7b5e683-c7e9-46c4-8110-9e01c64d21a8","Type":"ContainerDied","Data":"7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb"} Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.912288 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fm7wk" event={"ID":"a7b5e683-c7e9-46c4-8110-9e01c64d21a8","Type":"ContainerDied","Data":"6d00911f28614c21d700e483bb763276e89f1e2530b0758edaaab85bf3ce200f"} Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.912306 4689 scope.go:117] "RemoveContainer" containerID="7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.936456 4689 scope.go:117] "RemoveContainer" containerID="3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.953193 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fm7wk"] Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.962981 4689 scope.go:117] "RemoveContainer" containerID="b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce" Jan 23 11:14:16 crc kubenswrapper[4689]: I0123 11:14:16.963259 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fm7wk"] Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.024960 4689 scope.go:117] "RemoveContainer" containerID="7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb" Jan 23 11:14:17 crc kubenswrapper[4689]: E0123 11:14:17.025559 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb\": container with ID starting with 7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb not found: ID does not exist" containerID="7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb" Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.025607 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb"} err="failed to get container status \"7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb\": rpc error: code = NotFound desc = could not find container \"7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb\": container with ID starting with 7702a47a078d501ccb88dafd7c8f39d7937694e5823734da9afc4b882c1a6bbb not found: ID does not exist" Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.025635 4689 scope.go:117] "RemoveContainer" containerID="3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e" Jan 23 11:14:17 crc kubenswrapper[4689]: E0123 11:14:17.025921 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e\": container with ID starting with 3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e not found: ID does not exist" containerID="3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e" Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.025952 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e"} err="failed to get container status \"3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e\": rpc error: code = NotFound desc = could not find container \"3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e\": container with ID starting with 3332c61fae6e89b8b75f18fe1c7eae22d640053ac7a356454ae5072c5274068e not found: ID does not exist" Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.025968 4689 scope.go:117] "RemoveContainer" containerID="b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce" Jan 23 11:14:17 crc kubenswrapper[4689]: E0123 11:14:17.026216 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce\": container with ID starting with b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce not found: ID does not exist" containerID="b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce" Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.026239 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce"} err="failed to get container status \"b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce\": rpc error: code = NotFound desc = could not find container \"b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce\": container with ID starting with b1cffd3ce6fc7dade83bb991783f3d46d6c13a9fd67661b44f492b63d0029bce not found: ID does not exist" Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.650743 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49fd7217-901d-4fa1-b3f2-2a883295cf83" path="/var/lib/kubelet/pods/49fd7217-901d-4fa1-b3f2-2a883295cf83/volumes" Jan 23 11:14:17 crc kubenswrapper[4689]: I0123 11:14:17.651453 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" path="/var/lib/kubelet/pods/a7b5e683-c7e9-46c4-8110-9e01c64d21a8/volumes" Jan 23 11:14:18 crc kubenswrapper[4689]: I0123 11:14:18.640858 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:14:18 crc kubenswrapper[4689]: E0123 11:14:18.641488 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.695783 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.696332 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-central-agent" containerID="cri-o://720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2" gracePeriod=30 Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.696438 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-notification-agent" containerID="cri-o://1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f" gracePeriod=30 Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.696423 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="sg-core" containerID="cri-o://8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3" gracePeriod=30 Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.696598 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="proxy-httpd" containerID="cri-o://f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635" gracePeriod=30 Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.704327 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.234:3000/\": EOF" Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.979545 4689 generic.go:334] "Generic (PLEG): container finished" podID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerID="f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635" exitCode=0 Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.979904 4689 generic.go:334] "Generic (PLEG): container finished" podID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerID="8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3" exitCode=2 Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.979935 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerDied","Data":"f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635"} Jan 23 11:14:20 crc kubenswrapper[4689]: I0123 11:14:20.979967 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerDied","Data":"8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3"} Jan 23 11:14:21 crc kubenswrapper[4689]: I0123 11:14:21.995473 4689 generic.go:334] "Generic (PLEG): container finished" podID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerID="720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2" exitCode=0 Jan 23 11:14:21 crc kubenswrapper[4689]: I0123 11:14:21.995501 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerDied","Data":"720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2"} Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.562304 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.601755 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-combined-ca-bundle\") pod \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.601936 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxjpn\" (UniqueName: \"kubernetes.io/projected/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-kube-api-access-lxjpn\") pod \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.601994 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-config-data\") pod \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.602091 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-scripts\") pod \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.602190 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-log-httpd\") pod \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.602240 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-run-httpd\") pod \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.602313 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-sg-core-conf-yaml\") pod \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\" (UID: \"8f248b30-b2e6-41ca-b2f5-717071bb0ef0\") " Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.623509 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8f248b30-b2e6-41ca-b2f5-717071bb0ef0" (UID: "8f248b30-b2e6-41ca-b2f5-717071bb0ef0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.623757 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8f248b30-b2e6-41ca-b2f5-717071bb0ef0" (UID: "8f248b30-b2e6-41ca-b2f5-717071bb0ef0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.625976 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-scripts" (OuterVolumeSpecName: "scripts") pod "8f248b30-b2e6-41ca-b2f5-717071bb0ef0" (UID: "8f248b30-b2e6-41ca-b2f5-717071bb0ef0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.636419 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-kube-api-access-lxjpn" (OuterVolumeSpecName: "kube-api-access-lxjpn") pod "8f248b30-b2e6-41ca-b2f5-717071bb0ef0" (UID: "8f248b30-b2e6-41ca-b2f5-717071bb0ef0"). InnerVolumeSpecName "kube-api-access-lxjpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.676388 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8f248b30-b2e6-41ca-b2f5-717071bb0ef0" (UID: "8f248b30-b2e6-41ca-b2f5-717071bb0ef0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.705727 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxjpn\" (UniqueName: \"kubernetes.io/projected/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-kube-api-access-lxjpn\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.705766 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.705779 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.705793 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.705807 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.729238 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f248b30-b2e6-41ca-b2f5-717071bb0ef0" (UID: "8f248b30-b2e6-41ca-b2f5-717071bb0ef0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.791332 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-config-data" (OuterVolumeSpecName: "config-data") pod "8f248b30-b2e6-41ca-b2f5-717071bb0ef0" (UID: "8f248b30-b2e6-41ca-b2f5-717071bb0ef0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.808189 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:23 crc kubenswrapper[4689]: I0123 11:14:23.808230 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f248b30-b2e6-41ca-b2f5-717071bb0ef0-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.020987 4689 generic.go:334] "Generic (PLEG): container finished" podID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerID="1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f" exitCode=0 Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.021030 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerDied","Data":"1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f"} Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.021057 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f248b30-b2e6-41ca-b2f5-717071bb0ef0","Type":"ContainerDied","Data":"9d677277b5beb51b13a7474bb55fe3b3cffb1bcef02405de35942056915e598e"} Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.021070 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.021091 4689 scope.go:117] "RemoveContainer" containerID="f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.055109 4689 scope.go:117] "RemoveContainer" containerID="8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.062691 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.074195 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.110430 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111129 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerName="heat-cfnapi" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111163 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerName="heat-cfnapi" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111192 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="sg-core" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111198 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="sg-core" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111213 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-notification-agent" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111219 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-notification-agent" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111237 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="proxy-httpd" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111243 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="proxy-httpd" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111258 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="extract-utilities" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111263 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="extract-utilities" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111274 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="extract-content" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111280 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="extract-content" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111289 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="registry-server" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111297 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="registry-server" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111309 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49fd7217-901d-4fa1-b3f2-2a883295cf83" containerName="heat-engine" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111315 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="49fd7217-901d-4fa1-b3f2-2a883295cf83" containerName="heat-engine" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111338 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-central-agent" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111345 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-central-agent" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111550 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="sg-core" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111562 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerName="heat-cfnapi" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111576 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerName="heat-cfnapi" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111584 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7b5e683-c7e9-46c4-8110-9e01c64d21a8" containerName="registry-server" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111593 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-central-agent" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111602 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="49fd7217-901d-4fa1-b3f2-2a883295cf83" containerName="heat-engine" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111612 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="proxy-httpd" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111619 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" containerName="ceilometer-notification-agent" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.111804 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerName="heat-cfnapi" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.111811 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ec58659-52d0-4a0d-a7b6-f6d1488be93c" containerName="heat-cfnapi" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.114311 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.118718 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.118987 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.120664 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.144848 4689 scope.go:117] "RemoveContainer" containerID="1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.170561 4689 scope.go:117] "RemoveContainer" containerID="720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.216984 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.217035 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-config-data\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.217092 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-log-httpd\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.217168 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ptvx\" (UniqueName: \"kubernetes.io/projected/6b1c9501-a666-49c8-b73f-f41896e26ecf-kube-api-access-4ptvx\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.217187 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.217244 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-run-httpd\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.217515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-scripts\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.254856 4689 scope.go:117] "RemoveContainer" containerID="f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.255551 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635\": container with ID starting with f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635 not found: ID does not exist" containerID="f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.255584 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635"} err="failed to get container status \"f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635\": rpc error: code = NotFound desc = could not find container \"f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635\": container with ID starting with f07c743432ccc711c42fc2b7444cd72591fee4de4226191dbecb5cf2499da635 not found: ID does not exist" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.255611 4689 scope.go:117] "RemoveContainer" containerID="8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.256051 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3\": container with ID starting with 8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3 not found: ID does not exist" containerID="8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.256080 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3"} err="failed to get container status \"8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3\": rpc error: code = NotFound desc = could not find container \"8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3\": container with ID starting with 8fbdd62192b825fe45aa3b0efada5edf71303fe161b7fb7a920470eb81d716c3 not found: ID does not exist" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.256094 4689 scope.go:117] "RemoveContainer" containerID="1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.256462 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f\": container with ID starting with 1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f not found: ID does not exist" containerID="1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.256485 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f"} err="failed to get container status \"1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f\": rpc error: code = NotFound desc = could not find container \"1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f\": container with ID starting with 1489edacea8583060b7d710946dc6d5c91815bb825f87ea2ee869fe65e17d76f not found: ID does not exist" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.256501 4689 scope.go:117] "RemoveContainer" containerID="720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2" Jan 23 11:14:24 crc kubenswrapper[4689]: E0123 11:14:24.256747 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2\": container with ID starting with 720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2 not found: ID does not exist" containerID="720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.256769 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2"} err="failed to get container status \"720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2\": rpc error: code = NotFound desc = could not find container \"720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2\": container with ID starting with 720df42ac1f147fe5b4a86711bfc39d478f81202d53a7d4f37d6c3189110cdc2 not found: ID does not exist" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.319418 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-scripts\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.319521 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.319546 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-config-data\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.319599 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-log-httpd\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.319665 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ptvx\" (UniqueName: \"kubernetes.io/projected/6b1c9501-a666-49c8-b73f-f41896e26ecf-kube-api-access-4ptvx\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.319690 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.319748 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-run-httpd\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.320988 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-run-httpd\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.321545 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-log-httpd\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.325711 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-scripts\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.325963 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-config-data\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.328177 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.332549 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.336518 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ptvx\" (UniqueName: \"kubernetes.io/projected/6b1c9501-a666-49c8-b73f-f41896e26ecf-kube-api-access-4ptvx\") pod \"ceilometer-0\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.430849 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:24 crc kubenswrapper[4689]: I0123 11:14:24.995068 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:25 crc kubenswrapper[4689]: I0123 11:14:25.033774 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerStarted","Data":"06482ea4c3dd7567a9631756966501c6b81eab1be830aaa34f31503255258f45"} Jan 23 11:14:25 crc kubenswrapper[4689]: I0123 11:14:25.656189 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f248b30-b2e6-41ca-b2f5-717071bb0ef0" path="/var/lib/kubelet/pods/8f248b30-b2e6-41ca-b2f5-717071bb0ef0/volumes" Jan 23 11:14:26 crc kubenswrapper[4689]: I0123 11:14:26.046080 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerStarted","Data":"2711514aa6908c824c79ac2d5c1adee31b78e730d11d918fbb6a18fa25dcdbd8"} Jan 23 11:14:26 crc kubenswrapper[4689]: I0123 11:14:26.197697 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:27 crc kubenswrapper[4689]: I0123 11:14:27.061484 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerStarted","Data":"7ba6d9544c60f64bd65b2fef0a86b7c3da9da12efdf1572936e737f3c53eecff"} Jan 23 11:14:28 crc kubenswrapper[4689]: I0123 11:14:28.074521 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerStarted","Data":"c825dd92eb488347487976865a8d80b8cc184d7f6a0632af4416f95c086a85ca"} Jan 23 11:14:28 crc kubenswrapper[4689]: I0123 11:14:28.076344 4689 generic.go:334] "Generic (PLEG): container finished" podID="f705ffdf-98ca-48b4-bd00-1a4804326940" containerID="73ca42c6e19fb5542e75c8c2d4ca6a2e2cb7ec372f27fe3a7f40d1dd4f49213b" exitCode=0 Jan 23 11:14:28 crc kubenswrapper[4689]: I0123 11:14:28.076393 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fq49m" event={"ID":"f705ffdf-98ca-48b4-bd00-1a4804326940","Type":"ContainerDied","Data":"73ca42c6e19fb5542e75c8c2d4ca6a2e2cb7ec372f27fe3a7f40d1dd4f49213b"} Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.090385 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerStarted","Data":"0828aeeefc076e6977b80931868e49a510283ccf57f4a605aa943546ef3e4fe3"} Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.090509 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-central-agent" containerID="cri-o://2711514aa6908c824c79ac2d5c1adee31b78e730d11d918fbb6a18fa25dcdbd8" gracePeriod=30 Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.090568 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="proxy-httpd" containerID="cri-o://0828aeeefc076e6977b80931868e49a510283ccf57f4a605aa943546ef3e4fe3" gracePeriod=30 Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.090623 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-notification-agent" containerID="cri-o://7ba6d9544c60f64bd65b2fef0a86b7c3da9da12efdf1572936e737f3c53eecff" gracePeriod=30 Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.090924 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.090645 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="sg-core" containerID="cri-o://c825dd92eb488347487976865a8d80b8cc184d7f6a0632af4416f95c086a85ca" gracePeriod=30 Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.120995 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.624831725 podStartE2EDuration="5.120972971s" podCreationTimestamp="2026-01-23 11:14:24 +0000 UTC" firstStartedPulling="2026-01-23 11:14:25.001141506 +0000 UTC m=+1529.625821385" lastFinishedPulling="2026-01-23 11:14:28.497282772 +0000 UTC m=+1533.121962631" observedRunningTime="2026-01-23 11:14:29.116827078 +0000 UTC m=+1533.741506927" watchObservedRunningTime="2026-01-23 11:14:29.120972971 +0000 UTC m=+1533.745652830" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.614362 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.648794 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-scripts\") pod \"f705ffdf-98ca-48b4-bd00-1a4804326940\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.648997 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-combined-ca-bundle\") pod \"f705ffdf-98ca-48b4-bd00-1a4804326940\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.649265 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-config-data\") pod \"f705ffdf-98ca-48b4-bd00-1a4804326940\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.649320 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qmqv\" (UniqueName: \"kubernetes.io/projected/f705ffdf-98ca-48b4-bd00-1a4804326940-kube-api-access-5qmqv\") pod \"f705ffdf-98ca-48b4-bd00-1a4804326940\" (UID: \"f705ffdf-98ca-48b4-bd00-1a4804326940\") " Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.659440 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-scripts" (OuterVolumeSpecName: "scripts") pod "f705ffdf-98ca-48b4-bd00-1a4804326940" (UID: "f705ffdf-98ca-48b4-bd00-1a4804326940"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.663742 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f705ffdf-98ca-48b4-bd00-1a4804326940-kube-api-access-5qmqv" (OuterVolumeSpecName: "kube-api-access-5qmqv") pod "f705ffdf-98ca-48b4-bd00-1a4804326940" (UID: "f705ffdf-98ca-48b4-bd00-1a4804326940"). InnerVolumeSpecName "kube-api-access-5qmqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.694557 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-config-data" (OuterVolumeSpecName: "config-data") pod "f705ffdf-98ca-48b4-bd00-1a4804326940" (UID: "f705ffdf-98ca-48b4-bd00-1a4804326940"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.705671 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f705ffdf-98ca-48b4-bd00-1a4804326940" (UID: "f705ffdf-98ca-48b4-bd00-1a4804326940"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.753621 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.753655 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qmqv\" (UniqueName: \"kubernetes.io/projected/f705ffdf-98ca-48b4-bd00-1a4804326940-kube-api-access-5qmqv\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.753666 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:29 crc kubenswrapper[4689]: I0123 11:14:29.753675 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f705ffdf-98ca-48b4-bd00-1a4804326940-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.102964 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fq49m" event={"ID":"f705ffdf-98ca-48b4-bd00-1a4804326940","Type":"ContainerDied","Data":"80b8b6d5cc5272818b0a373a0bb0683481922b47c563bd4838f26fd36c22ef07"} Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.103006 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80b8b6d5cc5272818b0a373a0bb0683481922b47c563bd4838f26fd36c22ef07" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.104047 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fq49m" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.106266 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerID="0828aeeefc076e6977b80931868e49a510283ccf57f4a605aa943546ef3e4fe3" exitCode=0 Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.106297 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerID="c825dd92eb488347487976865a8d80b8cc184d7f6a0632af4416f95c086a85ca" exitCode=2 Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.106304 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerID="7ba6d9544c60f64bd65b2fef0a86b7c3da9da12efdf1572936e737f3c53eecff" exitCode=0 Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.106337 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerDied","Data":"0828aeeefc076e6977b80931868e49a510283ccf57f4a605aa943546ef3e4fe3"} Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.106398 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerDied","Data":"c825dd92eb488347487976865a8d80b8cc184d7f6a0632af4416f95c086a85ca"} Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.106412 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerDied","Data":"7ba6d9544c60f64bd65b2fef0a86b7c3da9da12efdf1572936e737f3c53eecff"} Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.214534 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 11:14:30 crc kubenswrapper[4689]: E0123 11:14:30.215490 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f705ffdf-98ca-48b4-bd00-1a4804326940" containerName="nova-cell0-conductor-db-sync" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.215514 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f705ffdf-98ca-48b4-bd00-1a4804326940" containerName="nova-cell0-conductor-db-sync" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.215764 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f705ffdf-98ca-48b4-bd00-1a4804326940" containerName="nova-cell0-conductor-db-sync" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.216687 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.219218 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.219349 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-54xtc" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.243355 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.373666 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwd78\" (UniqueName: \"kubernetes.io/projected/343d44be-4dcf-4047-beef-a1603131b74b-kube-api-access-vwd78\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.373739 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/343d44be-4dcf-4047-beef-a1603131b74b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.374215 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/343d44be-4dcf-4047-beef-a1603131b74b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.476331 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwd78\" (UniqueName: \"kubernetes.io/projected/343d44be-4dcf-4047-beef-a1603131b74b-kube-api-access-vwd78\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.476392 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/343d44be-4dcf-4047-beef-a1603131b74b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.476498 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/343d44be-4dcf-4047-beef-a1603131b74b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.482105 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/343d44be-4dcf-4047-beef-a1603131b74b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.482698 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/343d44be-4dcf-4047-beef-a1603131b74b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.500890 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwd78\" (UniqueName: \"kubernetes.io/projected/343d44be-4dcf-4047-beef-a1603131b74b-kube-api-access-vwd78\") pod \"nova-cell0-conductor-0\" (UID: \"343d44be-4dcf-4047-beef-a1603131b74b\") " pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.533157 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:30 crc kubenswrapper[4689]: I0123 11:14:30.640483 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:14:30 crc kubenswrapper[4689]: E0123 11:14:30.640866 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:14:31 crc kubenswrapper[4689]: I0123 11:14:31.063055 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 11:14:31 crc kubenswrapper[4689]: I0123 11:14:31.116191 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"343d44be-4dcf-4047-beef-a1603131b74b","Type":"ContainerStarted","Data":"19f40ab9f625d6c3d9492dcf650b9f15fee422c9f19c0615c835bf5cf73f1843"} Jan 23 11:14:32 crc kubenswrapper[4689]: I0123 11:14:32.128528 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"343d44be-4dcf-4047-beef-a1603131b74b","Type":"ContainerStarted","Data":"741467fa27dd106f27511df13a8ed18618f20ae9d94e994169294ce5ff881371"} Jan 23 11:14:32 crc kubenswrapper[4689]: I0123 11:14:32.129101 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:32 crc kubenswrapper[4689]: I0123 11:14:32.150477 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.15045643 podStartE2EDuration="2.15045643s" podCreationTimestamp="2026-01-23 11:14:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:14:32.147856456 +0000 UTC m=+1536.772536315" watchObservedRunningTime="2026-01-23 11:14:32.15045643 +0000 UTC m=+1536.775136289" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.211521 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerID="2711514aa6908c824c79ac2d5c1adee31b78e730d11d918fbb6a18fa25dcdbd8" exitCode=0 Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.211804 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerDied","Data":"2711514aa6908c824c79ac2d5c1adee31b78e730d11d918fbb6a18fa25dcdbd8"} Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.432019 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.565375 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-combined-ca-bundle\") pod \"6b1c9501-a666-49c8-b73f-f41896e26ecf\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.565447 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-config-data\") pod \"6b1c9501-a666-49c8-b73f-f41896e26ecf\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.565478 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-scripts\") pod \"6b1c9501-a666-49c8-b73f-f41896e26ecf\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.565566 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-run-httpd\") pod \"6b1c9501-a666-49c8-b73f-f41896e26ecf\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.566048 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6b1c9501-a666-49c8-b73f-f41896e26ecf" (UID: "6b1c9501-a666-49c8-b73f-f41896e26ecf"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.566425 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ptvx\" (UniqueName: \"kubernetes.io/projected/6b1c9501-a666-49c8-b73f-f41896e26ecf-kube-api-access-4ptvx\") pod \"6b1c9501-a666-49c8-b73f-f41896e26ecf\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.566485 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-sg-core-conf-yaml\") pod \"6b1c9501-a666-49c8-b73f-f41896e26ecf\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.566576 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-log-httpd\") pod \"6b1c9501-a666-49c8-b73f-f41896e26ecf\" (UID: \"6b1c9501-a666-49c8-b73f-f41896e26ecf\") " Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.567208 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6b1c9501-a666-49c8-b73f-f41896e26ecf" (UID: "6b1c9501-a666-49c8-b73f-f41896e26ecf"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.567282 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.572117 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-scripts" (OuterVolumeSpecName: "scripts") pod "6b1c9501-a666-49c8-b73f-f41896e26ecf" (UID: "6b1c9501-a666-49c8-b73f-f41896e26ecf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.590095 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b1c9501-a666-49c8-b73f-f41896e26ecf-kube-api-access-4ptvx" (OuterVolumeSpecName: "kube-api-access-4ptvx") pod "6b1c9501-a666-49c8-b73f-f41896e26ecf" (UID: "6b1c9501-a666-49c8-b73f-f41896e26ecf"). InnerVolumeSpecName "kube-api-access-4ptvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.627353 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6b1c9501-a666-49c8-b73f-f41896e26ecf" (UID: "6b1c9501-a666-49c8-b73f-f41896e26ecf"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.669225 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ptvx\" (UniqueName: \"kubernetes.io/projected/6b1c9501-a666-49c8-b73f-f41896e26ecf-kube-api-access-4ptvx\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.670329 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.670371 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6b1c9501-a666-49c8-b73f-f41896e26ecf-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.670385 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.694337 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b1c9501-a666-49c8-b73f-f41896e26ecf" (UID: "6b1c9501-a666-49c8-b73f-f41896e26ecf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.708330 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-config-data" (OuterVolumeSpecName: "config-data") pod "6b1c9501-a666-49c8-b73f-f41896e26ecf" (UID: "6b1c9501-a666-49c8-b73f-f41896e26ecf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.772983 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:38 crc kubenswrapper[4689]: I0123 11:14:38.773759 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b1c9501-a666-49c8-b73f-f41896e26ecf-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.265893 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6b1c9501-a666-49c8-b73f-f41896e26ecf","Type":"ContainerDied","Data":"06482ea4c3dd7567a9631756966501c6b81eab1be830aaa34f31503255258f45"} Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.265945 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.265959 4689 scope.go:117] "RemoveContainer" containerID="0828aeeefc076e6977b80931868e49a510283ccf57f4a605aa943546ef3e4fe3" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.298058 4689 scope.go:117] "RemoveContainer" containerID="c825dd92eb488347487976865a8d80b8cc184d7f6a0632af4416f95c086a85ca" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.324274 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.327818 4689 scope.go:117] "RemoveContainer" containerID="7ba6d9544c60f64bd65b2fef0a86b7c3da9da12efdf1572936e737f3c53eecff" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.341278 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.357469 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:39 crc kubenswrapper[4689]: E0123 11:14:39.358212 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-central-agent" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358245 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-central-agent" Jan 23 11:14:39 crc kubenswrapper[4689]: E0123 11:14:39.358295 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="proxy-httpd" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358308 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="proxy-httpd" Jan 23 11:14:39 crc kubenswrapper[4689]: E0123 11:14:39.358340 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-notification-agent" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358352 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-notification-agent" Jan 23 11:14:39 crc kubenswrapper[4689]: E0123 11:14:39.358377 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="sg-core" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358389 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="sg-core" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358737 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-notification-agent" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358787 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="ceilometer-central-agent" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358812 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="proxy-httpd" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.358832 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" containerName="sg-core" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.367128 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.367263 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.369974 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.370255 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.375093 4689 scope.go:117] "RemoveContainer" containerID="2711514aa6908c824c79ac2d5c1adee31b78e730d11d918fbb6a18fa25dcdbd8" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.493001 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-run-httpd\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.493518 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x4qn\" (UniqueName: \"kubernetes.io/projected/54b8e2d1-a39d-4941-a20f-3fef827df232-kube-api-access-5x4qn\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.493802 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.493869 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-log-httpd\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.493978 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.494452 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-config-data\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.494614 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-scripts\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597106 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-run-httpd\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597247 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x4qn\" (UniqueName: \"kubernetes.io/projected/54b8e2d1-a39d-4941-a20f-3fef827df232-kube-api-access-5x4qn\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597366 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597408 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-log-httpd\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597464 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597607 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-config-data\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597682 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-scripts\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597869 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-log-httpd\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.597606 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-run-httpd\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.603816 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-scripts\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.604118 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.605910 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-config-data\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.611776 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.616982 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x4qn\" (UniqueName: \"kubernetes.io/projected/54b8e2d1-a39d-4941-a20f-3fef827df232-kube-api-access-5x4qn\") pod \"ceilometer-0\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " pod="openstack/ceilometer-0" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.665086 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b1c9501-a666-49c8-b73f-f41896e26ecf" path="/var/lib/kubelet/pods/6b1c9501-a666-49c8-b73f-f41896e26ecf/volumes" Jan 23 11:14:39 crc kubenswrapper[4689]: I0123 11:14:39.694075 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:40 crc kubenswrapper[4689]: I0123 11:14:40.201650 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:40 crc kubenswrapper[4689]: I0123 11:14:40.281484 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerStarted","Data":"a4674148a5764b2d3b7165c3ca78f97152be2d99050ff80db89e18f1f76954f4"} Jan 23 11:14:40 crc kubenswrapper[4689]: I0123 11:14:40.577287 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 23 11:14:40 crc kubenswrapper[4689]: I0123 11:14:40.672041 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.150525 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-jl9nk"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.152077 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.156010 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.156692 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.232574 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-jl9nk"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.252789 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-scripts\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.253068 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-config-data\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.253239 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn2pt\" (UniqueName: \"kubernetes.io/projected/f2f42c42-b409-4a81-ae96-5b5a3b62263f-kube-api-access-dn2pt\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.253387 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.314602 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerStarted","Data":"b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6"} Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.354203 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-scripts\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.354283 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-config-data\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.354384 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn2pt\" (UniqueName: \"kubernetes.io/projected/f2f42c42-b409-4a81-ae96-5b5a3b62263f-kube-api-access-dn2pt\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.354437 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.360882 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.364019 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.365066 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-scripts\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.375633 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.375821 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-config-data\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.376020 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.393718 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn2pt\" (UniqueName: \"kubernetes.io/projected/f2f42c42-b409-4a81-ae96-5b5a3b62263f-kube-api-access-dn2pt\") pod \"nova-cell0-cell-mapping-jl9nk\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.415240 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.475894 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.477323 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.483664 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.484251 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.534473 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.564235 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-config-data\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.564611 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9mk9\" (UniqueName: \"kubernetes.io/projected/c762a88e-58b4-4f59-baa1-5c67e420d49d-kube-api-access-k9mk9\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.567161 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c762a88e-58b4-4f59-baa1-5c67e420d49d-logs\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.567345 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.610692 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.612627 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.634957 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.646224 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:14:41 crc kubenswrapper[4689]: E0123 11:14:41.646643 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.684121 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.684234 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gn74\" (UniqueName: \"kubernetes.io/projected/120cf4c8-6188-4715-b5f3-87c3e49e359b-kube-api-access-8gn74\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.691321 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.691429 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c762a88e-58b4-4f59-baa1-5c67e420d49d-logs\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.691576 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.691674 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-config-data\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.691719 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9mk9\" (UniqueName: \"kubernetes.io/projected/c762a88e-58b4-4f59-baa1-5c67e420d49d-kube-api-access-k9mk9\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.694742 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c762a88e-58b4-4f59-baa1-5c67e420d49d-logs\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.733203 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.737558 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9mk9\" (UniqueName: \"kubernetes.io/projected/c762a88e-58b4-4f59-baa1-5c67e420d49d-kube-api-access-k9mk9\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.762262 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-config-data\") pod \"nova-api-0\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.781886 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.793121 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6p8jg\" (UniqueName: \"kubernetes.io/projected/55e23846-09dc-40f5-9f13-153651b9b46c-kube-api-access-6p8jg\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.793198 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55e23846-09dc-40f5-9f13-153651b9b46c-logs\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.793260 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.793288 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gn74\" (UniqueName: \"kubernetes.io/projected/120cf4c8-6188-4715-b5f3-87c3e49e359b-kube-api-access-8gn74\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.793348 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-config-data\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.793401 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.793417 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.795866 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.795904 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.795919 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7877d89589-pf8mb"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.798724 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.798788 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.802267 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.802644 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.802874 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.810859 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.823292 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gn74\" (UniqueName: \"kubernetes.io/projected/120cf4c8-6188-4715-b5f3-87c3e49e359b-kube-api-access-8gn74\") pod \"nova-cell1-novncproxy-0\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.855243 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7877d89589-pf8mb"] Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.897442 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6p8jg\" (UniqueName: \"kubernetes.io/projected/55e23846-09dc-40f5-9f13-153651b9b46c-kube-api-access-6p8jg\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.897511 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55e23846-09dc-40f5-9f13-153651b9b46c-logs\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.897663 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-config-data\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.897730 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.898458 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55e23846-09dc-40f5-9f13-153651b9b46c-logs\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.911231 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.912512 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-config-data\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.931889 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6p8jg\" (UniqueName: \"kubernetes.io/projected/55e23846-09dc-40f5-9f13-153651b9b46c-kube-api-access-6p8jg\") pod \"nova-metadata-0\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " pod="openstack/nova-metadata-0" Jan 23 11:14:41 crc kubenswrapper[4689]: I0123 11:14:41.973899 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.001821 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-config\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.001872 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-svc\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.001925 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.001962 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-nb\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.002101 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-config-data\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.002124 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvr49\" (UniqueName: \"kubernetes.io/projected/0ba414e6-e845-4fe5-9f63-664e783ba9f0-kube-api-access-vvr49\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.002195 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-sb\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.002306 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-swift-storage-0\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.002324 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4p4l\" (UniqueName: \"kubernetes.io/projected/b9b62f22-f147-40e1-a4fc-cd548ce3c065-kube-api-access-h4p4l\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.061447 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105364 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-config-data\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105416 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvr49\" (UniqueName: \"kubernetes.io/projected/0ba414e6-e845-4fe5-9f63-664e783ba9f0-kube-api-access-vvr49\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105474 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-sb\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105583 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-swift-storage-0\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105614 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4p4l\" (UniqueName: \"kubernetes.io/projected/b9b62f22-f147-40e1-a4fc-cd548ce3c065-kube-api-access-h4p4l\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105657 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-config\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105692 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-svc\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105740 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.105781 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-nb\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.106881 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-svc\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.106996 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-nb\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.107019 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-sb\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.107119 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-swift-storage-0\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.107656 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-config\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.126811 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4p4l\" (UniqueName: \"kubernetes.io/projected/b9b62f22-f147-40e1-a4fc-cd548ce3c065-kube-api-access-h4p4l\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.129918 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-config-data\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.135565 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.136100 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvr49\" (UniqueName: \"kubernetes.io/projected/0ba414e6-e845-4fe5-9f63-664e783ba9f0-kube-api-access-vvr49\") pod \"dnsmasq-dns-7877d89589-pf8mb\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.157018 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.184169 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.256831 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-jl9nk"] Jan 23 11:14:42 crc kubenswrapper[4689]: W0123 11:14:42.320067 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2f42c42_b409_4a81_ae96_5b5a3b62263f.slice/crio-84efd3492830f8e6b0f0e2fe020087afda5b3a43f8cbd371a83ba26387032458 WatchSource:0}: Error finding container 84efd3492830f8e6b0f0e2fe020087afda5b3a43f8cbd371a83ba26387032458: Status 404 returned error can't find the container with id 84efd3492830f8e6b0f0e2fe020087afda5b3a43f8cbd371a83ba26387032458 Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.377126 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerStarted","Data":"bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b"} Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.542039 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.865278 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:14:42 crc kubenswrapper[4689]: I0123 11:14:42.885330 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:42 crc kubenswrapper[4689]: W0123 11:14:42.899256 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55e23846_09dc_40f5_9f13_153651b9b46c.slice/crio-a627f95f9873e9bb93c2edc15ee9a2801c2b385dd976b0846b0e82cc5c3ae7ca WatchSource:0}: Error finding container a627f95f9873e9bb93c2edc15ee9a2801c2b385dd976b0846b0e82cc5c3ae7ca: Status 404 returned error can't find the container with id a627f95f9873e9bb93c2edc15ee9a2801c2b385dd976b0846b0e82cc5c3ae7ca Jan 23 11:14:42 crc kubenswrapper[4689]: W0123 11:14:42.900018 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod120cf4c8_6188_4715_b5f3_87c3e49e359b.slice/crio-1c177560fbd636cd4fdb48213830a8f20011c3d4f803ad6ccf3a39d974198f03 WatchSource:0}: Error finding container 1c177560fbd636cd4fdb48213830a8f20011c3d4f803ad6ccf3a39d974198f03: Status 404 returned error can't find the container with id 1c177560fbd636cd4fdb48213830a8f20011c3d4f803ad6ccf3a39d974198f03 Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.118748 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.128868 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7877d89589-pf8mb"] Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.435398 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6lvmx"] Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.440402 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.451996 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.452231 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.460760 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6lvmx"] Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.467316 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b9b62f22-f147-40e1-a4fc-cd548ce3c065","Type":"ContainerStarted","Data":"0ef47879e4caf64fba99cbf9b56e01897090f640ba02e02e088b1c94526c5cf8"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.500115 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"120cf4c8-6188-4715-b5f3-87c3e49e359b","Type":"ContainerStarted","Data":"1c177560fbd636cd4fdb48213830a8f20011c3d4f803ad6ccf3a39d974198f03"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.504777 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.504873 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-config-data\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.504901 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc9zq\" (UniqueName: \"kubernetes.io/projected/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-kube-api-access-vc9zq\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.504970 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-scripts\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.530706 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerStarted","Data":"339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.540928 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-jl9nk" event={"ID":"f2f42c42-b409-4a81-ae96-5b5a3b62263f","Type":"ContainerStarted","Data":"12b802b621f6682c2aa84ae6f8e224746e9d3e7a66592d4238bb02f54e31058c"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.540990 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-jl9nk" event={"ID":"f2f42c42-b409-4a81-ae96-5b5a3b62263f","Type":"ContainerStarted","Data":"84efd3492830f8e6b0f0e2fe020087afda5b3a43f8cbd371a83ba26387032458"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.550124 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55e23846-09dc-40f5-9f13-153651b9b46c","Type":"ContainerStarted","Data":"a627f95f9873e9bb93c2edc15ee9a2801c2b385dd976b0846b0e82cc5c3ae7ca"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.553655 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" event={"ID":"0ba414e6-e845-4fe5-9f63-664e783ba9f0","Type":"ContainerStarted","Data":"93ae80bcd44e00b58151b0474f2a34cb35c3e6c2708280e6cf11efcbfb7018d4"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.556301 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c762a88e-58b4-4f59-baa1-5c67e420d49d","Type":"ContainerStarted","Data":"67627ce2d9f8c03b8e0f0df494119306c80a881f80067dab6a0d1cc2e7e1b263"} Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.571939 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-jl9nk" podStartSLOduration=2.571923123 podStartE2EDuration="2.571923123s" podCreationTimestamp="2026-01-23 11:14:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:14:43.561857182 +0000 UTC m=+1548.186537041" watchObservedRunningTime="2026-01-23 11:14:43.571923123 +0000 UTC m=+1548.196602982" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.607549 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.607629 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-config-data\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.607710 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc9zq\" (UniqueName: \"kubernetes.io/projected/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-kube-api-access-vc9zq\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.607758 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-scripts\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.613759 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.623522 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc9zq\" (UniqueName: \"kubernetes.io/projected/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-kube-api-access-vc9zq\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.624566 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-config-data\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.627535 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-scripts\") pod \"nova-cell1-conductor-db-sync-6lvmx\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:43 crc kubenswrapper[4689]: I0123 11:14:43.799864 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:44 crc kubenswrapper[4689]: I0123 11:14:44.576607 4689 generic.go:334] "Generic (PLEG): container finished" podID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerID="d40a99b4157af27855f036cb1824f91cb317ed664f4a61c777d1fccf82f0b8fb" exitCode=0 Jan 23 11:14:44 crc kubenswrapper[4689]: I0123 11:14:44.576855 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" event={"ID":"0ba414e6-e845-4fe5-9f63-664e783ba9f0","Type":"ContainerDied","Data":"d40a99b4157af27855f036cb1824f91cb317ed664f4a61c777d1fccf82f0b8fb"} Jan 23 11:14:44 crc kubenswrapper[4689]: I0123 11:14:44.924915 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6lvmx"] Jan 23 11:14:45 crc kubenswrapper[4689]: I0123 11:14:45.033703 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:14:45 crc kubenswrapper[4689]: I0123 11:14:45.045879 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:45 crc kubenswrapper[4689]: I0123 11:14:45.594680 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" event={"ID":"b1a086a5-30c9-425a-9cc8-bfc7ff439d23","Type":"ContainerStarted","Data":"9dbe739de7249b376c002714c81d1ccba141dc708ea171a02e7201824fc38087"} Jan 23 11:14:46 crc kubenswrapper[4689]: I0123 11:14:46.607740 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" event={"ID":"0ba414e6-e845-4fe5-9f63-664e783ba9f0","Type":"ContainerStarted","Data":"e4d243ffbd82cfab70c1b4a2d90c9394f6ee95016744e18cf204fde54906c2e1"} Jan 23 11:14:46 crc kubenswrapper[4689]: I0123 11:14:46.608289 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:46 crc kubenswrapper[4689]: I0123 11:14:46.611722 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" event={"ID":"b1a086a5-30c9-425a-9cc8-bfc7ff439d23","Type":"ContainerStarted","Data":"ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a"} Jan 23 11:14:46 crc kubenswrapper[4689]: I0123 11:14:46.632548 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" podStartSLOduration=5.632528767 podStartE2EDuration="5.632528767s" podCreationTimestamp="2026-01-23 11:14:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:14:46.626695712 +0000 UTC m=+1551.251375581" watchObservedRunningTime="2026-01-23 11:14:46.632528767 +0000 UTC m=+1551.257208626" Jan 23 11:14:46 crc kubenswrapper[4689]: I0123 11:14:46.650882 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" podStartSLOduration=3.650863244 podStartE2EDuration="3.650863244s" podCreationTimestamp="2026-01-23 11:14:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:14:46.645461729 +0000 UTC m=+1551.270141598" watchObservedRunningTime="2026-01-23 11:14:46.650863244 +0000 UTC m=+1551.275543103" Jan 23 11:14:49 crc kubenswrapper[4689]: I0123 11:14:49.655388 4689 generic.go:334] "Generic (PLEG): container finished" podID="b57b3805-c785-43e0-a8f2-6bab72916aa4" containerID="d3dceed73ed2632c55b101e45e515ae5a53090607fdbb257cc86516383108f3c" exitCode=137 Jan 23 11:14:49 crc kubenswrapper[4689]: I0123 11:14:49.655683 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b8f8c96b8-rjrg7" event={"ID":"b57b3805-c785-43e0-a8f2-6bab72916aa4","Type":"ContainerDied","Data":"d3dceed73ed2632c55b101e45e515ae5a53090607fdbb257cc86516383108f3c"} Jan 23 11:14:49 crc kubenswrapper[4689]: I0123 11:14:49.660159 4689 generic.go:334] "Generic (PLEG): container finished" podID="c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" containerID="56722c79a0b7c9ccf3206e72f496e3327caaeeb8d980d5807fdf0bfe06565079" exitCode=137 Jan 23 11:14:49 crc kubenswrapper[4689]: I0123 11:14:49.660201 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" event={"ID":"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b","Type":"ContainerDied","Data":"56722c79a0b7c9ccf3206e72f496e3327caaeeb8d980d5807fdf0bfe06565079"} Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.731688 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.777461 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.793369 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-combined-ca-bundle\") pod \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.793659 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data\") pod \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.793745 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lsdlp\" (UniqueName: \"kubernetes.io/projected/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-kube-api-access-lsdlp\") pod \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.793791 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data-custom\") pod \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\" (UID: \"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.813489 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-kube-api-access-lsdlp" (OuterVolumeSpecName: "kube-api-access-lsdlp") pod "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" (UID: "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b"). InnerVolumeSpecName "kube-api-access-lsdlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.836568 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" (UID: "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.891923 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" (UID: "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.897032 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhztb\" (UniqueName: \"kubernetes.io/projected/b57b3805-c785-43e0-a8f2-6bab72916aa4-kube-api-access-nhztb\") pod \"b57b3805-c785-43e0-a8f2-6bab72916aa4\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.897100 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data\") pod \"b57b3805-c785-43e0-a8f2-6bab72916aa4\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.897182 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data-custom\") pod \"b57b3805-c785-43e0-a8f2-6bab72916aa4\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.897489 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-combined-ca-bundle\") pod \"b57b3805-c785-43e0-a8f2-6bab72916aa4\" (UID: \"b57b3805-c785-43e0-a8f2-6bab72916aa4\") " Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.901956 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.901992 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lsdlp\" (UniqueName: \"kubernetes.io/projected/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-kube-api-access-lsdlp\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.902003 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.903164 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b57b3805-c785-43e0-a8f2-6bab72916aa4-kube-api-access-nhztb" (OuterVolumeSpecName: "kube-api-access-nhztb") pod "b57b3805-c785-43e0-a8f2-6bab72916aa4" (UID: "b57b3805-c785-43e0-a8f2-6bab72916aa4"). InnerVolumeSpecName "kube-api-access-nhztb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.903516 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b57b3805-c785-43e0-a8f2-6bab72916aa4" (UID: "b57b3805-c785-43e0-a8f2-6bab72916aa4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:50 crc kubenswrapper[4689]: I0123 11:14:50.939410 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data" (OuterVolumeSpecName: "config-data") pod "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" (UID: "c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.004377 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhztb\" (UniqueName: \"kubernetes.io/projected/b57b3805-c785-43e0-a8f2-6bab72916aa4-kube-api-access-nhztb\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.004427 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.004440 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.006396 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b57b3805-c785-43e0-a8f2-6bab72916aa4" (UID: "b57b3805-c785-43e0-a8f2-6bab72916aa4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.022339 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data" (OuterVolumeSpecName: "config-data") pod "b57b3805-c785-43e0-a8f2-6bab72916aa4" (UID: "b57b3805-c785-43e0-a8f2-6bab72916aa4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.106046 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.106080 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57b3805-c785-43e0-a8f2-6bab72916aa4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.699850 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b9b62f22-f147-40e1-a4fc-cd548ce3c065","Type":"ContainerStarted","Data":"8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.701625 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"120cf4c8-6188-4715-b5f3-87c3e49e359b","Type":"ContainerStarted","Data":"3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.701756 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="120cf4c8-6188-4715-b5f3-87c3e49e359b" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf" gracePeriod=30 Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.707804 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerStarted","Data":"53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.708015 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-central-agent" containerID="cri-o://b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6" gracePeriod=30 Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.708290 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.708340 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="proxy-httpd" containerID="cri-o://53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae" gracePeriod=30 Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.708415 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="sg-core" containerID="cri-o://339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb" gracePeriod=30 Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.708459 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-notification-agent" containerID="cri-o://bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b" gracePeriod=30 Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.719049 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" event={"ID":"c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b","Type":"ContainerDied","Data":"7845099d8b66a18e2445cc4bdd1723b0fe617f0b1b9da9db6647b83f0237e5b7"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.719096 4689 scope.go:117] "RemoveContainer" containerID="56722c79a0b7c9ccf3206e72f496e3327caaeeb8d980d5807fdf0bfe06565079" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.719272 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6dbc994bcd-6lt7s" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.719350 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.603949015 podStartE2EDuration="10.719328513s" podCreationTimestamp="2026-01-23 11:14:41 +0000 UTC" firstStartedPulling="2026-01-23 11:14:43.129244264 +0000 UTC m=+1547.753924113" lastFinishedPulling="2026-01-23 11:14:50.244623742 +0000 UTC m=+1554.869303611" observedRunningTime="2026-01-23 11:14:51.713097278 +0000 UTC m=+1556.337777137" watchObservedRunningTime="2026-01-23 11:14:51.719328513 +0000 UTC m=+1556.344008372" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.732098 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55e23846-09dc-40f5-9f13-153651b9b46c","Type":"ContainerStarted","Data":"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.732187 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55e23846-09dc-40f5-9f13-153651b9b46c","Type":"ContainerStarted","Data":"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.732270 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-metadata" containerID="cri-o://0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf" gracePeriod=30 Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.732380 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-log" containerID="cri-o://10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b" gracePeriod=30 Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.738638 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.446557334 podStartE2EDuration="10.738622854s" podCreationTimestamp="2026-01-23 11:14:41 +0000 UTC" firstStartedPulling="2026-01-23 11:14:42.912317549 +0000 UTC m=+1547.536997418" lastFinishedPulling="2026-01-23 11:14:50.204383079 +0000 UTC m=+1554.829062938" observedRunningTime="2026-01-23 11:14:51.736624004 +0000 UTC m=+1556.361303863" watchObservedRunningTime="2026-01-23 11:14:51.738622854 +0000 UTC m=+1556.363302713" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.752522 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b8f8c96b8-rjrg7" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.753599 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b8f8c96b8-rjrg7" event={"ID":"b57b3805-c785-43e0-a8f2-6bab72916aa4","Type":"ContainerDied","Data":"2fbb2b60d11819e4c9949c8ad4a6434b0c6b806dd6206a21648bf0306cf503ee"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.753656 4689 scope.go:117] "RemoveContainer" containerID="d3dceed73ed2632c55b101e45e515ae5a53090607fdbb257cc86516383108f3c" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.795046 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c762a88e-58b4-4f59-baa1-5c67e420d49d","Type":"ContainerStarted","Data":"712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.795422 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c762a88e-58b4-4f59-baa1-5c67e420d49d","Type":"ContainerStarted","Data":"20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276"} Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.813627 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.827443399 podStartE2EDuration="12.813605852s" podCreationTimestamp="2026-01-23 11:14:39 +0000 UTC" firstStartedPulling="2026-01-23 11:14:40.202254228 +0000 UTC m=+1544.826934087" lastFinishedPulling="2026-01-23 11:14:50.188416681 +0000 UTC m=+1554.813096540" observedRunningTime="2026-01-23 11:14:51.776049186 +0000 UTC m=+1556.400729045" watchObservedRunningTime="2026-01-23 11:14:51.813605852 +0000 UTC m=+1556.438285711" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.812129 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.816354 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.831375 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.489920013 podStartE2EDuration="10.831351034s" podCreationTimestamp="2026-01-23 11:14:41 +0000 UTC" firstStartedPulling="2026-01-23 11:14:42.903195671 +0000 UTC m=+1547.527875530" lastFinishedPulling="2026-01-23 11:14:50.244626702 +0000 UTC m=+1554.869306551" observedRunningTime="2026-01-23 11:14:51.795690165 +0000 UTC m=+1556.420370024" watchObservedRunningTime="2026-01-23 11:14:51.831351034 +0000 UTC m=+1556.456030893" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.974202 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6dbc994bcd-6lt7s"] Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.974570 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.987959 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.320942245 podStartE2EDuration="10.987940436s" podCreationTimestamp="2026-01-23 11:14:41 +0000 UTC" firstStartedPulling="2026-01-23 11:14:42.574720848 +0000 UTC m=+1547.199400697" lastFinishedPulling="2026-01-23 11:14:50.241719039 +0000 UTC m=+1554.866398888" observedRunningTime="2026-01-23 11:14:51.836969814 +0000 UTC m=+1556.461649683" watchObservedRunningTime="2026-01-23 11:14:51.987940436 +0000 UTC m=+1556.612620295" Jan 23 11:14:51 crc kubenswrapper[4689]: I0123 11:14:51.989218 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-6dbc994bcd-6lt7s"] Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.017208 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7b8f8c96b8-rjrg7"] Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.027775 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-7b8f8c96b8-rjrg7"] Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.062240 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.062292 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.157827 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.157860 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.187374 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.200847 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.351022 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d978555f9-bkg4n"] Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.351307 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" podUID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerName="dnsmasq-dns" containerID="cri-o://d6cee0c9c9a5402c222163856c2e78ceeada861845cebb80204ab765be67b048" gracePeriod=10 Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.573419 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.775052 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-combined-ca-bundle\") pod \"55e23846-09dc-40f5-9f13-153651b9b46c\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.775251 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55e23846-09dc-40f5-9f13-153651b9b46c-logs\") pod \"55e23846-09dc-40f5-9f13-153651b9b46c\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.775302 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-config-data\") pod \"55e23846-09dc-40f5-9f13-153651b9b46c\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.775346 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6p8jg\" (UniqueName: \"kubernetes.io/projected/55e23846-09dc-40f5-9f13-153651b9b46c-kube-api-access-6p8jg\") pod \"55e23846-09dc-40f5-9f13-153651b9b46c\" (UID: \"55e23846-09dc-40f5-9f13-153651b9b46c\") " Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.776678 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55e23846-09dc-40f5-9f13-153651b9b46c-logs" (OuterVolumeSpecName: "logs") pod "55e23846-09dc-40f5-9f13-153651b9b46c" (UID: "55e23846-09dc-40f5-9f13-153651b9b46c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.801259 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55e23846-09dc-40f5-9f13-153651b9b46c-kube-api-access-6p8jg" (OuterVolumeSpecName: "kube-api-access-6p8jg") pod "55e23846-09dc-40f5-9f13-153651b9b46c" (UID: "55e23846-09dc-40f5-9f13-153651b9b46c"). InnerVolumeSpecName "kube-api-access-6p8jg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.841500 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-config-data" (OuterVolumeSpecName: "config-data") pod "55e23846-09dc-40f5-9f13-153651b9b46c" (UID: "55e23846-09dc-40f5-9f13-153651b9b46c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.841768 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55e23846-09dc-40f5-9f13-153651b9b46c" (UID: "55e23846-09dc-40f5-9f13-153651b9b46c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.861996 4689 generic.go:334] "Generic (PLEG): container finished" podID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerID="d6cee0c9c9a5402c222163856c2e78ceeada861845cebb80204ab765be67b048" exitCode=0 Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.862107 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" event={"ID":"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb","Type":"ContainerDied","Data":"d6cee0c9c9a5402c222163856c2e78ceeada861845cebb80204ab765be67b048"} Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.872893 4689 generic.go:334] "Generic (PLEG): container finished" podID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerID="53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae" exitCode=0 Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.872927 4689 generic.go:334] "Generic (PLEG): container finished" podID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerID="339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb" exitCode=2 Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.872937 4689 generic.go:334] "Generic (PLEG): container finished" podID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerID="bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b" exitCode=0 Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.872993 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerDied","Data":"53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae"} Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.873054 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerDied","Data":"339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb"} Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.873065 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerDied","Data":"bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b"} Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.878927 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.878957 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55e23846-09dc-40f5-9f13-153651b9b46c-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.878967 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e23846-09dc-40f5-9f13-153651b9b46c-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.878976 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6p8jg\" (UniqueName: \"kubernetes.io/projected/55e23846-09dc-40f5-9f13-153651b9b46c-kube-api-access-6p8jg\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.886343 4689 generic.go:334] "Generic (PLEG): container finished" podID="55e23846-09dc-40f5-9f13-153651b9b46c" containerID="0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf" exitCode=0 Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.886365 4689 generic.go:334] "Generic (PLEG): container finished" podID="55e23846-09dc-40f5-9f13-153651b9b46c" containerID="10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b" exitCode=143 Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.886396 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55e23846-09dc-40f5-9f13-153651b9b46c","Type":"ContainerDied","Data":"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf"} Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.886419 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55e23846-09dc-40f5-9f13-153651b9b46c","Type":"ContainerDied","Data":"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b"} Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.886428 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55e23846-09dc-40f5-9f13-153651b9b46c","Type":"ContainerDied","Data":"a627f95f9873e9bb93c2edc15ee9a2801c2b385dd976b0846b0e82cc5c3ae7ca"} Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.886442 4689 scope.go:117] "RemoveContainer" containerID="0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.886569 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.906408 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.239:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.906634 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.239:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:14:52 crc kubenswrapper[4689]: I0123 11:14:52.972249 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.002096 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.002816 4689 scope.go:117] "RemoveContainer" containerID="10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.022408 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:53 crc kubenswrapper[4689]: E0123 11:14:53.023025 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" containerName="heat-cfnapi" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.023038 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" containerName="heat-cfnapi" Jan 23 11:14:53 crc kubenswrapper[4689]: E0123 11:14:53.023756 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57b3805-c785-43e0-a8f2-6bab72916aa4" containerName="heat-api" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.023771 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57b3805-c785-43e0-a8f2-6bab72916aa4" containerName="heat-api" Jan 23 11:14:53 crc kubenswrapper[4689]: E0123 11:14:53.023787 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-metadata" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.023794 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-metadata" Jan 23 11:14:53 crc kubenswrapper[4689]: E0123 11:14:53.023824 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-log" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.023830 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-log" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.024375 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-log" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.024401 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57b3805-c785-43e0-a8f2-6bab72916aa4" containerName="heat-api" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.024418 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" containerName="heat-cfnapi" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.024430 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" containerName="nova-metadata-metadata" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.026705 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.028699 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.028940 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.033366 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.038362 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.070465 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.078331 4689 scope.go:117] "RemoveContainer" containerID="0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf" Jan 23 11:14:53 crc kubenswrapper[4689]: E0123 11:14:53.081304 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf\": container with ID starting with 0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf not found: ID does not exist" containerID="0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.081358 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf"} err="failed to get container status \"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf\": rpc error: code = NotFound desc = could not find container \"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf\": container with ID starting with 0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf not found: ID does not exist" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.081386 4689 scope.go:117] "RemoveContainer" containerID="10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b" Jan 23 11:14:53 crc kubenswrapper[4689]: E0123 11:14:53.083407 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b\": container with ID starting with 10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b not found: ID does not exist" containerID="10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.083453 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b"} err="failed to get container status \"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b\": rpc error: code = NotFound desc = could not find container \"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b\": container with ID starting with 10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b not found: ID does not exist" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.083484 4689 scope.go:117] "RemoveContainer" containerID="0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.084886 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf"} err="failed to get container status \"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf\": rpc error: code = NotFound desc = could not find container \"0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf\": container with ID starting with 0b0253097fcc8b6d8ed11e85b5b894f72df186a13eba864a525a5e0e2980afaf not found: ID does not exist" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.084932 4689 scope.go:117] "RemoveContainer" containerID="10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.085734 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b"} err="failed to get container status \"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b\": rpc error: code = NotFound desc = could not find container \"10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b\": container with ID starting with 10a2c8d62cadf2c9faed8ef7bfcfa28b545e0b784d3f8252af83cc24ba86281b not found: ID does not exist" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.184893 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-svc\") pod \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.184943 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v2qh\" (UniqueName: \"kubernetes.io/projected/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-kube-api-access-4v2qh\") pod \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185027 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-swift-storage-0\") pod \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185088 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-sb\") pod \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185112 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-config\") pod \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185205 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-nb\") pod \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\" (UID: \"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185638 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9128fe08-a23a-43f9-8fc6-91eed858ba84-logs\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185700 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185778 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185865 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sxpd\" (UniqueName: \"kubernetes.io/projected/9128fe08-a23a-43f9-8fc6-91eed858ba84-kube-api-access-7sxpd\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.185900 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-config-data\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.206595 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-kube-api-access-4v2qh" (OuterVolumeSpecName: "kube-api-access-4v2qh") pod "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" (UID: "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb"). InnerVolumeSpecName "kube-api-access-4v2qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.287662 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sxpd\" (UniqueName: \"kubernetes.io/projected/9128fe08-a23a-43f9-8fc6-91eed858ba84-kube-api-access-7sxpd\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.287702 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-config-data\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.287836 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9128fe08-a23a-43f9-8fc6-91eed858ba84-logs\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.287870 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.287925 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.288022 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v2qh\" (UniqueName: \"kubernetes.io/projected/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-kube-api-access-4v2qh\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.288508 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9128fe08-a23a-43f9-8fc6-91eed858ba84-logs\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.301544 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-config-data\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.305360 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.307873 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sxpd\" (UniqueName: \"kubernetes.io/projected/9128fe08-a23a-43f9-8fc6-91eed858ba84-kube-api-access-7sxpd\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.313819 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" (UID: "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.329506 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.352190 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-config" (OuterVolumeSpecName: "config") pod "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" (UID: "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.366921 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" (UID: "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.382730 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" (UID: "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.383623 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" (UID: "67be6fe3-b5ef-4a4c-80b2-7be74e8391fb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.390948 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.390993 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.391010 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.391022 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.391035 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.394483 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.443358 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.596970 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-scripts\") pod \"54b8e2d1-a39d-4941-a20f-3fef827df232\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.597388 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-config-data\") pod \"54b8e2d1-a39d-4941-a20f-3fef827df232\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.597425 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-log-httpd\") pod \"54b8e2d1-a39d-4941-a20f-3fef827df232\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.597545 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-run-httpd\") pod \"54b8e2d1-a39d-4941-a20f-3fef827df232\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.597616 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5x4qn\" (UniqueName: \"kubernetes.io/projected/54b8e2d1-a39d-4941-a20f-3fef827df232-kube-api-access-5x4qn\") pod \"54b8e2d1-a39d-4941-a20f-3fef827df232\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.597665 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-combined-ca-bundle\") pod \"54b8e2d1-a39d-4941-a20f-3fef827df232\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.597806 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-sg-core-conf-yaml\") pod \"54b8e2d1-a39d-4941-a20f-3fef827df232\" (UID: \"54b8e2d1-a39d-4941-a20f-3fef827df232\") " Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.600613 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "54b8e2d1-a39d-4941-a20f-3fef827df232" (UID: "54b8e2d1-a39d-4941-a20f-3fef827df232"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.603537 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "54b8e2d1-a39d-4941-a20f-3fef827df232" (UID: "54b8e2d1-a39d-4941-a20f-3fef827df232"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.603652 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-scripts" (OuterVolumeSpecName: "scripts") pod "54b8e2d1-a39d-4941-a20f-3fef827df232" (UID: "54b8e2d1-a39d-4941-a20f-3fef827df232"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.624190 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54b8e2d1-a39d-4941-a20f-3fef827df232-kube-api-access-5x4qn" (OuterVolumeSpecName: "kube-api-access-5x4qn") pod "54b8e2d1-a39d-4941-a20f-3fef827df232" (UID: "54b8e2d1-a39d-4941-a20f-3fef827df232"). InnerVolumeSpecName "kube-api-access-5x4qn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.653316 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "54b8e2d1-a39d-4941-a20f-3fef827df232" (UID: "54b8e2d1-a39d-4941-a20f-3fef827df232"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.661095 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55e23846-09dc-40f5-9f13-153651b9b46c" path="/var/lib/kubelet/pods/55e23846-09dc-40f5-9f13-153651b9b46c/volumes" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.661965 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b57b3805-c785-43e0-a8f2-6bab72916aa4" path="/var/lib/kubelet/pods/b57b3805-c785-43e0-a8f2-6bab72916aa4/volumes" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.662698 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b" path="/var/lib/kubelet/pods/c9366d7f-a9ba-41f8-a1e3-2d67fd41c48b/volumes" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.700828 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5x4qn\" (UniqueName: \"kubernetes.io/projected/54b8e2d1-a39d-4941-a20f-3fef827df232-kube-api-access-5x4qn\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.700866 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.700876 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.700884 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.700895 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54b8e2d1-a39d-4941-a20f-3fef827df232-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.791377 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54b8e2d1-a39d-4941-a20f-3fef827df232" (UID: "54b8e2d1-a39d-4941-a20f-3fef827df232"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.802983 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.807890 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-config-data" (OuterVolumeSpecName: "config-data") pod "54b8e2d1-a39d-4941-a20f-3fef827df232" (UID: "54b8e2d1-a39d-4941-a20f-3fef827df232"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.892540 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:53 crc kubenswrapper[4689]: W0123 11:14:53.895299 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2 WatchSource:0}: Error finding container 3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2: Status 404 returned error can't find the container with id 3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2 Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.912596 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b8e2d1-a39d-4941-a20f-3fef827df232-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.921602 4689 generic.go:334] "Generic (PLEG): container finished" podID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerID="b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6" exitCode=0 Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.921666 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.921684 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerDied","Data":"b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6"} Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.922505 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54b8e2d1-a39d-4941-a20f-3fef827df232","Type":"ContainerDied","Data":"a4674148a5764b2d3b7165c3ca78f97152be2d99050ff80db89e18f1f76954f4"} Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.922539 4689 scope.go:117] "RemoveContainer" containerID="53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.927383 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9128fe08-a23a-43f9-8fc6-91eed858ba84","Type":"ContainerStarted","Data":"3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2"} Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.929962 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" event={"ID":"67be6fe3-b5ef-4a4c-80b2-7be74e8391fb","Type":"ContainerDied","Data":"6d6b7229f90af3194b531b2a5b9afcc44509530d68fd00fa602f267cd6693eb8"} Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.930079 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d978555f9-bkg4n" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.965780 4689 scope.go:117] "RemoveContainer" containerID="339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb" Jan 23 11:14:53 crc kubenswrapper[4689]: I0123 11:14:53.974743 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d978555f9-bkg4n"] Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.008967 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d978555f9-bkg4n"] Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.026295 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.029243 4689 scope.go:117] "RemoveContainer" containerID="bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.051396 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.066539 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.066989 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerName="init" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067002 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerName="init" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.067019 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="sg-core" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067026 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="sg-core" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.067038 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-central-agent" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067044 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-central-agent" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.067052 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerName="dnsmasq-dns" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067057 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerName="dnsmasq-dns" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.067070 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="proxy-httpd" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067076 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="proxy-httpd" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.067100 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-notification-agent" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067106 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-notification-agent" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067325 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-notification-agent" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067336 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="sg-core" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067348 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="proxy-httpd" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067364 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" containerName="dnsmasq-dns" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.067374 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" containerName="ceilometer-central-agent" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.069297 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.069428 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.071350 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.071770 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.114714 4689 scope.go:117] "RemoveContainer" containerID="b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.118925 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.118973 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-run-httpd\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.119003 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-config-data\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.119066 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.119140 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-log-httpd\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.119196 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9srtn\" (UniqueName: \"kubernetes.io/projected/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-kube-api-access-9srtn\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.119237 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-scripts\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.221563 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-run-httpd\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.221633 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-config-data\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.221728 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.221834 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-log-httpd\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.221887 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9srtn\" (UniqueName: \"kubernetes.io/projected/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-kube-api-access-9srtn\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.221939 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-scripts\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.222016 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.223072 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-run-httpd\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.223086 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-log-httpd\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.228652 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.228988 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-scripts\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.229323 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-config-data\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.237972 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.244349 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9srtn\" (UniqueName: \"kubernetes.io/projected/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-kube-api-access-9srtn\") pod \"ceilometer-0\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.404163 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.406477 4689 scope.go:117] "RemoveContainer" containerID="53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.406881 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae\": container with ID starting with 53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae not found: ID does not exist" containerID="53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.406927 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae"} err="failed to get container status \"53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae\": rpc error: code = NotFound desc = could not find container \"53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae\": container with ID starting with 53929f88c34824375d0e70ad79dacc7b517ff549e2bfd6baca861fcd6e93c3ae not found: ID does not exist" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.406954 4689 scope.go:117] "RemoveContainer" containerID="339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.407265 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb\": container with ID starting with 339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb not found: ID does not exist" containerID="339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.407307 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb"} err="failed to get container status \"339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb\": rpc error: code = NotFound desc = could not find container \"339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb\": container with ID starting with 339f6e837a224e98534d245e5385e240a2078f223b74fd710b13e7f21fd501fb not found: ID does not exist" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.407343 4689 scope.go:117] "RemoveContainer" containerID="bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.407577 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b\": container with ID starting with bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b not found: ID does not exist" containerID="bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.407604 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b"} err="failed to get container status \"bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b\": rpc error: code = NotFound desc = could not find container \"bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b\": container with ID starting with bbeeb0b29bc4399b1141684566507cbe7ca2d0a8a2907cf1205aa6a9676ab08b not found: ID does not exist" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.407619 4689 scope.go:117] "RemoveContainer" containerID="b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6" Jan 23 11:14:54 crc kubenswrapper[4689]: E0123 11:14:54.407827 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6\": container with ID starting with b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6 not found: ID does not exist" containerID="b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.407855 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6"} err="failed to get container status \"b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6\": rpc error: code = NotFound desc = could not find container \"b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6\": container with ID starting with b5a230949bd9ef0e68da8f05c1564c7952818e5956ade35b01f12a82342384b6 not found: ID does not exist" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.407872 4689 scope.go:117] "RemoveContainer" containerID="d6cee0c9c9a5402c222163856c2e78ceeada861845cebb80204ab765be67b048" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.442915 4689 scope.go:117] "RemoveContainer" containerID="cc197ab7b0e78dffca3a2549bb5f38538c3bd4df8d00abf5259344da36c8f7ba" Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.940458 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9128fe08-a23a-43f9-8fc6-91eed858ba84","Type":"ContainerStarted","Data":"61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514"} Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.940827 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9128fe08-a23a-43f9-8fc6-91eed858ba84","Type":"ContainerStarted","Data":"f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842"} Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.945916 4689 generic.go:334] "Generic (PLEG): container finished" podID="f2f42c42-b409-4a81-ae96-5b5a3b62263f" containerID="12b802b621f6682c2aa84ae6f8e224746e9d3e7a66592d4238bb02f54e31058c" exitCode=0 Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.946345 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-jl9nk" event={"ID":"f2f42c42-b409-4a81-ae96-5b5a3b62263f","Type":"ContainerDied","Data":"12b802b621f6682c2aa84ae6f8e224746e9d3e7a66592d4238bb02f54e31058c"} Jan 23 11:14:54 crc kubenswrapper[4689]: W0123 11:14:54.953036 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaa2f2f0_0618_4f1f_97bd_8301459d3fd7.slice/crio-3bc59efe95cba59454e4783d3c06b29062bf77d692913d20159ec3fa65229996 WatchSource:0}: Error finding container 3bc59efe95cba59454e4783d3c06b29062bf77d692913d20159ec3fa65229996: Status 404 returned error can't find the container with id 3bc59efe95cba59454e4783d3c06b29062bf77d692913d20159ec3fa65229996 Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.955531 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:14:54 crc kubenswrapper[4689]: I0123 11:14:54.969724 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.969706575 podStartE2EDuration="2.969706575s" podCreationTimestamp="2026-01-23 11:14:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:14:54.966512676 +0000 UTC m=+1559.591192535" watchObservedRunningTime="2026-01-23 11:14:54.969706575 +0000 UTC m=+1559.594386434" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.689914 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:14:55 crc kubenswrapper[4689]: E0123 11:14:55.707604 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.719080 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54b8e2d1-a39d-4941-a20f-3fef827df232" path="/var/lib/kubelet/pods/54b8e2d1-a39d-4941-a20f-3fef827df232/volumes" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.720207 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67be6fe3-b5ef-4a4c-80b2-7be74e8391fb" path="/var/lib/kubelet/pods/67be6fe3-b5ef-4a4c-80b2-7be74e8391fb/volumes" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.839048 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-1b36-account-create-update-k4c7s"] Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.842672 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.845044 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.864316 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-x9w8s"] Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.866037 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.879264 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-1b36-account-create-update-k4c7s"] Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.896211 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-x9w8s"] Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.963186 4689 generic.go:334] "Generic (PLEG): container finished" podID="b1a086a5-30c9-425a-9cc8-bfc7ff439d23" containerID="ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a" exitCode=0 Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.963846 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" event={"ID":"b1a086a5-30c9-425a-9cc8-bfc7ff439d23","Type":"ContainerDied","Data":"ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a"} Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.966332 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq8q8\" (UniqueName: \"kubernetes.io/projected/9787cfb5-0f46-4f88-a9e2-608370561edb-kube-api-access-pq8q8\") pod \"aodh-1b36-account-create-update-k4c7s\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.966378 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e6c9b-000b-4516-9caf-a5c8204515eb-operator-scripts\") pod \"aodh-db-create-x9w8s\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.966542 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerStarted","Data":"0ec0bfff9fdb26b7911c040e3934a635b1dc984c7d8efe5eb340f531ab20a473"} Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.966654 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerStarted","Data":"3bc59efe95cba59454e4783d3c06b29062bf77d692913d20159ec3fa65229996"} Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.967141 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq9f5\" (UniqueName: \"kubernetes.io/projected/007e6c9b-000b-4516-9caf-a5c8204515eb-kube-api-access-tq9f5\") pod \"aodh-db-create-x9w8s\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:55 crc kubenswrapper[4689]: I0123 11:14:55.967456 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9787cfb5-0f46-4f88-a9e2-608370561edb-operator-scripts\") pod \"aodh-1b36-account-create-update-k4c7s\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.069441 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9787cfb5-0f46-4f88-a9e2-608370561edb-operator-scripts\") pod \"aodh-1b36-account-create-update-k4c7s\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.069514 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq8q8\" (UniqueName: \"kubernetes.io/projected/9787cfb5-0f46-4f88-a9e2-608370561edb-kube-api-access-pq8q8\") pod \"aodh-1b36-account-create-update-k4c7s\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.069577 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e6c9b-000b-4516-9caf-a5c8204515eb-operator-scripts\") pod \"aodh-db-create-x9w8s\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.069836 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq9f5\" (UniqueName: \"kubernetes.io/projected/007e6c9b-000b-4516-9caf-a5c8204515eb-kube-api-access-tq9f5\") pod \"aodh-db-create-x9w8s\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.070269 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9787cfb5-0f46-4f88-a9e2-608370561edb-operator-scripts\") pod \"aodh-1b36-account-create-update-k4c7s\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.071008 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e6c9b-000b-4516-9caf-a5c8204515eb-operator-scripts\") pod \"aodh-db-create-x9w8s\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.105751 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq9f5\" (UniqueName: \"kubernetes.io/projected/007e6c9b-000b-4516-9caf-a5c8204515eb-kube-api-access-tq9f5\") pod \"aodh-db-create-x9w8s\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.106378 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq8q8\" (UniqueName: \"kubernetes.io/projected/9787cfb5-0f46-4f88-a9e2-608370561edb-kube-api-access-pq8q8\") pod \"aodh-1b36-account-create-update-k4c7s\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.200773 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.207539 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-x9w8s" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.466033 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.582262 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-config-data\") pod \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.582947 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-combined-ca-bundle\") pod \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.583081 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-scripts\") pod \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.583137 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dn2pt\" (UniqueName: \"kubernetes.io/projected/f2f42c42-b409-4a81-ae96-5b5a3b62263f-kube-api-access-dn2pt\") pod \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\" (UID: \"f2f42c42-b409-4a81-ae96-5b5a3b62263f\") " Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.588875 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2f42c42-b409-4a81-ae96-5b5a3b62263f-kube-api-access-dn2pt" (OuterVolumeSpecName: "kube-api-access-dn2pt") pod "f2f42c42-b409-4a81-ae96-5b5a3b62263f" (UID: "f2f42c42-b409-4a81-ae96-5b5a3b62263f"). InnerVolumeSpecName "kube-api-access-dn2pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.591486 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-scripts" (OuterVolumeSpecName: "scripts") pod "f2f42c42-b409-4a81-ae96-5b5a3b62263f" (UID: "f2f42c42-b409-4a81-ae96-5b5a3b62263f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.629462 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-config-data" (OuterVolumeSpecName: "config-data") pod "f2f42c42-b409-4a81-ae96-5b5a3b62263f" (UID: "f2f42c42-b409-4a81-ae96-5b5a3b62263f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.649588 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2f42c42-b409-4a81-ae96-5b5a3b62263f" (UID: "f2f42c42-b409-4a81-ae96-5b5a3b62263f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.688976 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.689008 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.689050 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dn2pt\" (UniqueName: \"kubernetes.io/projected/f2f42c42-b409-4a81-ae96-5b5a3b62263f-kube-api-access-dn2pt\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.689275 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2f42c42-b409-4a81-ae96-5b5a3b62263f-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.737680 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-x9w8s"] Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.760079 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-1b36-account-create-update-k4c7s"] Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.977795 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-x9w8s" event={"ID":"007e6c9b-000b-4516-9caf-a5c8204515eb","Type":"ContainerStarted","Data":"d4ac9565f628f614399f6dccc953e3b17707907a5f4101691334b7f8d5f5f5ca"} Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.977840 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-x9w8s" event={"ID":"007e6c9b-000b-4516-9caf-a5c8204515eb","Type":"ContainerStarted","Data":"20848b067e725a93567e57aa3bbd3b65e44f2841c365da70b56aa8582677b8db"} Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.981493 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerStarted","Data":"6bdb06fe70b6db3b72df82f7be0e104e39fb0602bb0614af52d4dca89caae423"} Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.984364 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1b36-account-create-update-k4c7s" event={"ID":"9787cfb5-0f46-4f88-a9e2-608370561edb","Type":"ContainerStarted","Data":"860b2e60837910ebdb4dcf0d77734726303ed567543edfcf085856979f25d8e4"} Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.984399 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1b36-account-create-update-k4c7s" event={"ID":"9787cfb5-0f46-4f88-a9e2-608370561edb","Type":"ContainerStarted","Data":"957064b4cc4c64d599cccd4e8968b9184869fc7164aa583c696b5e76c7ca9b3e"} Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.988884 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-jl9nk" Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.988901 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-jl9nk" event={"ID":"f2f42c42-b409-4a81-ae96-5b5a3b62263f","Type":"ContainerDied","Data":"84efd3492830f8e6b0f0e2fe020087afda5b3a43f8cbd371a83ba26387032458"} Jan 23 11:14:56 crc kubenswrapper[4689]: I0123 11:14:56.989079 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84efd3492830f8e6b0f0e2fe020087afda5b3a43f8cbd371a83ba26387032458" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.013021 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-x9w8s" podStartSLOduration=2.012997704 podStartE2EDuration="2.012997704s" podCreationTimestamp="2026-01-23 11:14:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:14:57.001926968 +0000 UTC m=+1561.626606827" watchObservedRunningTime="2026-01-23 11:14:57.012997704 +0000 UTC m=+1561.637677563" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.029884 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-1b36-account-create-update-k4c7s" podStartSLOduration=2.029867114 podStartE2EDuration="2.029867114s" podCreationTimestamp="2026-01-23 11:14:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:14:57.020938261 +0000 UTC m=+1561.645618120" watchObservedRunningTime="2026-01-23 11:14:57.029867114 +0000 UTC m=+1561.654546973" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.146481 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.146757 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-log" containerID="cri-o://20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276" gracePeriod=30 Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.147307 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-api" containerID="cri-o://712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4" gracePeriod=30 Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.171952 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.172223 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="b9b62f22-f147-40e1-a4fc-cd548ce3c065" containerName="nova-scheduler-scheduler" containerID="cri-o://8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73" gracePeriod=30 Jan 23 11:14:57 crc kubenswrapper[4689]: E0123 11:14:57.187311 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 11:14:57 crc kubenswrapper[4689]: E0123 11:14:57.189157 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 11:14:57 crc kubenswrapper[4689]: E0123 11:14:57.192033 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 11:14:57 crc kubenswrapper[4689]: E0123 11:14:57.192084 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="b9b62f22-f147-40e1-a4fc-cd548ce3c065" containerName="nova-scheduler-scheduler" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.196946 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.197235 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-log" containerID="cri-o://f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842" gracePeriod=30 Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.197783 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-metadata" containerID="cri-o://61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514" gracePeriod=30 Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.500247 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.611835 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc9zq\" (UniqueName: \"kubernetes.io/projected/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-kube-api-access-vc9zq\") pod \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.611954 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-scripts\") pod \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.612073 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-config-data\") pod \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.612157 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-combined-ca-bundle\") pod \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\" (UID: \"b1a086a5-30c9-425a-9cc8-bfc7ff439d23\") " Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.617616 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-scripts" (OuterVolumeSpecName: "scripts") pod "b1a086a5-30c9-425a-9cc8-bfc7ff439d23" (UID: "b1a086a5-30c9-425a-9cc8-bfc7ff439d23"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.624464 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-kube-api-access-vc9zq" (OuterVolumeSpecName: "kube-api-access-vc9zq") pod "b1a086a5-30c9-425a-9cc8-bfc7ff439d23" (UID: "b1a086a5-30c9-425a-9cc8-bfc7ff439d23"). InnerVolumeSpecName "kube-api-access-vc9zq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.654298 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1a086a5-30c9-425a-9cc8-bfc7ff439d23" (UID: "b1a086a5-30c9-425a-9cc8-bfc7ff439d23"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.664322 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-config-data" (OuterVolumeSpecName: "config-data") pod "b1a086a5-30c9-425a-9cc8-bfc7ff439d23" (UID: "b1a086a5-30c9-425a-9cc8-bfc7ff439d23"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.715871 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.715906 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.715915 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc9zq\" (UniqueName: \"kubernetes.io/projected/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-kube-api-access-vc9zq\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:57 crc kubenswrapper[4689]: I0123 11:14:57.715925 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1a086a5-30c9-425a-9cc8-bfc7ff439d23-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.002104 4689 generic.go:334] "Generic (PLEG): container finished" podID="9787cfb5-0f46-4f88-a9e2-608370561edb" containerID="860b2e60837910ebdb4dcf0d77734726303ed567543edfcf085856979f25d8e4" exitCode=0 Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.002616 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1b36-account-create-update-k4c7s" event={"ID":"9787cfb5-0f46-4f88-a9e2-608370561edb","Type":"ContainerDied","Data":"860b2e60837910ebdb4dcf0d77734726303ed567543edfcf085856979f25d8e4"} Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.005333 4689 generic.go:334] "Generic (PLEG): container finished" podID="007e6c9b-000b-4516-9caf-a5c8204515eb" containerID="d4ac9565f628f614399f6dccc953e3b17707907a5f4101691334b7f8d5f5f5ca" exitCode=0 Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.005492 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-x9w8s" event={"ID":"007e6c9b-000b-4516-9caf-a5c8204515eb","Type":"ContainerDied","Data":"d4ac9565f628f614399f6dccc953e3b17707907a5f4101691334b7f8d5f5f5ca"} Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.012198 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" event={"ID":"b1a086a5-30c9-425a-9cc8-bfc7ff439d23","Type":"ContainerDied","Data":"9dbe739de7249b376c002714c81d1ccba141dc708ea171a02e7201824fc38087"} Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.012238 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9dbe739de7249b376c002714c81d1ccba141dc708ea171a02e7201824fc38087" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.012293 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6lvmx" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.033534 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerStarted","Data":"31f0c2b65a74ac161bbad3b5d41f4382e9cf06395fa6fb3fd13e65b70b151e59"} Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.050347 4689 generic.go:334] "Generic (PLEG): container finished" podID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerID="20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276" exitCode=143 Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.050451 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c762a88e-58b4-4f59-baa1-5c67e420d49d","Type":"ContainerDied","Data":"20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276"} Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.077848 4689 generic.go:334] "Generic (PLEG): container finished" podID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerID="61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514" exitCode=0 Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.077885 4689 generic.go:334] "Generic (PLEG): container finished" podID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerID="f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842" exitCode=143 Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.077908 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9128fe08-a23a-43f9-8fc6-91eed858ba84","Type":"ContainerDied","Data":"61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514"} Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.079892 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9128fe08-a23a-43f9-8fc6-91eed858ba84","Type":"ContainerDied","Data":"f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842"} Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.142189 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 11:14:58 crc kubenswrapper[4689]: E0123 11:14:58.142682 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1a086a5-30c9-425a-9cc8-bfc7ff439d23" containerName="nova-cell1-conductor-db-sync" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.142694 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1a086a5-30c9-425a-9cc8-bfc7ff439d23" containerName="nova-cell1-conductor-db-sync" Jan 23 11:14:58 crc kubenswrapper[4689]: E0123 11:14:58.142739 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2f42c42-b409-4a81-ae96-5b5a3b62263f" containerName="nova-manage" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.142744 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2f42c42-b409-4a81-ae96-5b5a3b62263f" containerName="nova-manage" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.142948 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2f42c42-b409-4a81-ae96-5b5a3b62263f" containerName="nova-manage" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.142991 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1a086a5-30c9-425a-9cc8-bfc7ff439d23" containerName="nova-cell1-conductor-db-sync" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.143823 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.148625 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.153900 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.233309 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd6kr\" (UniqueName: \"kubernetes.io/projected/5f00d886-838d-4678-acd0-d917f134dd59-kube-api-access-gd6kr\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.233387 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f00d886-838d-4678-acd0-d917f134dd59-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.233485 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f00d886-838d-4678-acd0-d917f134dd59-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.335408 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f00d886-838d-4678-acd0-d917f134dd59-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.335562 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd6kr\" (UniqueName: \"kubernetes.io/projected/5f00d886-838d-4678-acd0-d917f134dd59-kube-api-access-gd6kr\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.335626 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f00d886-838d-4678-acd0-d917f134dd59-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.340801 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f00d886-838d-4678-acd0-d917f134dd59-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.341909 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f00d886-838d-4678-acd0-d917f134dd59-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.352875 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd6kr\" (UniqueName: \"kubernetes.io/projected/5f00d886-838d-4678-acd0-d917f134dd59-kube-api-access-gd6kr\") pod \"nova-cell1-conductor-0\" (UID: \"5f00d886-838d-4678-acd0-d917f134dd59\") " pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.396879 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.396934 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.515419 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.564724 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.659926 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9128fe08-a23a-43f9-8fc6-91eed858ba84-logs\") pod \"9128fe08-a23a-43f9-8fc6-91eed858ba84\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.660309 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sxpd\" (UniqueName: \"kubernetes.io/projected/9128fe08-a23a-43f9-8fc6-91eed858ba84-kube-api-access-7sxpd\") pod \"9128fe08-a23a-43f9-8fc6-91eed858ba84\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.660344 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-nova-metadata-tls-certs\") pod \"9128fe08-a23a-43f9-8fc6-91eed858ba84\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.660461 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-combined-ca-bundle\") pod \"9128fe08-a23a-43f9-8fc6-91eed858ba84\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.660562 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-config-data\") pod \"9128fe08-a23a-43f9-8fc6-91eed858ba84\" (UID: \"9128fe08-a23a-43f9-8fc6-91eed858ba84\") " Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.665515 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9128fe08-a23a-43f9-8fc6-91eed858ba84-logs" (OuterVolumeSpecName: "logs") pod "9128fe08-a23a-43f9-8fc6-91eed858ba84" (UID: "9128fe08-a23a-43f9-8fc6-91eed858ba84"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.673458 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9128fe08-a23a-43f9-8fc6-91eed858ba84-kube-api-access-7sxpd" (OuterVolumeSpecName: "kube-api-access-7sxpd") pod "9128fe08-a23a-43f9-8fc6-91eed858ba84" (UID: "9128fe08-a23a-43f9-8fc6-91eed858ba84"). InnerVolumeSpecName "kube-api-access-7sxpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.751421 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-config-data" (OuterVolumeSpecName: "config-data") pod "9128fe08-a23a-43f9-8fc6-91eed858ba84" (UID: "9128fe08-a23a-43f9-8fc6-91eed858ba84"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.757196 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9128fe08-a23a-43f9-8fc6-91eed858ba84" (UID: "9128fe08-a23a-43f9-8fc6-91eed858ba84"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.765099 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sxpd\" (UniqueName: \"kubernetes.io/projected/9128fe08-a23a-43f9-8fc6-91eed858ba84-kube-api-access-7sxpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.765161 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.765172 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.765182 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9128fe08-a23a-43f9-8fc6-91eed858ba84-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.777961 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9128fe08-a23a-43f9-8fc6-91eed858ba84" (UID: "9128fe08-a23a-43f9-8fc6-91eed858ba84"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:14:58 crc kubenswrapper[4689]: I0123 11:14:58.877447 4689 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9128fe08-a23a-43f9-8fc6-91eed858ba84-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.102284 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerStarted","Data":"8e05a396643386b57b4b4ce124d29bfc25d476720883b87bf06fb5130320d428"} Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.102805 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.113029 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.114311 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9128fe08-a23a-43f9-8fc6-91eed858ba84","Type":"ContainerDied","Data":"3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2"} Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.114390 4689 scope.go:117] "RemoveContainer" containerID="61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.129139 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.291555644 podStartE2EDuration="5.129111686s" podCreationTimestamp="2026-01-23 11:14:54 +0000 UTC" firstStartedPulling="2026-01-23 11:14:54.955757388 +0000 UTC m=+1559.580437247" lastFinishedPulling="2026-01-23 11:14:58.79331343 +0000 UTC m=+1563.417993289" observedRunningTime="2026-01-23 11:14:59.119184109 +0000 UTC m=+1563.743863978" watchObservedRunningTime="2026-01-23 11:14:59.129111686 +0000 UTC m=+1563.753791545" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.164698 4689 scope.go:117] "RemoveContainer" containerID="f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.170357 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.194225 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.205407 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:59 crc kubenswrapper[4689]: E0123 11:14:59.205931 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-metadata" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.205947 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-metadata" Jan 23 11:14:59 crc kubenswrapper[4689]: E0123 11:14:59.205970 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-log" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.205977 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-log" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.206213 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-metadata" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.206231 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" containerName="nova-metadata-log" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.207508 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.218998 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.219229 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.219820 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.280045 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.286899 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9b4t\" (UniqueName: \"kubernetes.io/projected/6fec438c-a06a-48cb-a875-9e79fd50b3ca-kube-api-access-z9b4t\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.286944 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.286996 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-config-data\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.287055 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.287125 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fec438c-a06a-48cb-a875-9e79fd50b3ca-logs\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.389740 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fec438c-a06a-48cb-a875-9e79fd50b3ca-logs\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.389906 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9b4t\" (UniqueName: \"kubernetes.io/projected/6fec438c-a06a-48cb-a875-9e79fd50b3ca-kube-api-access-z9b4t\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.389945 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.390682 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-config-data\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.390896 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.394524 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.395796 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.396628 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fec438c-a06a-48cb-a875-9e79fd50b3ca-logs\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.399894 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-config-data\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:14:59 crc kubenswrapper[4689]: I0123 11:14:59.413160 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9b4t\" (UniqueName: \"kubernetes.io/projected/6fec438c-a06a-48cb-a875-9e79fd50b3ca-kube-api-access-z9b4t\") pod \"nova-metadata-0\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " pod="openstack/nova-metadata-0" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.537635 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.595935 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-x9w8s" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.681646 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9128fe08-a23a-43f9-8fc6-91eed858ba84" path="/var/lib/kubelet/pods/9128fe08-a23a-43f9-8fc6-91eed858ba84/volumes" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.701190 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.728418 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq9f5\" (UniqueName: \"kubernetes.io/projected/007e6c9b-000b-4516-9caf-a5c8204515eb-kube-api-access-tq9f5\") pod \"007e6c9b-000b-4516-9caf-a5c8204515eb\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.728594 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e6c9b-000b-4516-9caf-a5c8204515eb-operator-scripts\") pod \"007e6c9b-000b-4516-9caf-a5c8204515eb\" (UID: \"007e6c9b-000b-4516-9caf-a5c8204515eb\") " Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.730375 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/007e6c9b-000b-4516-9caf-a5c8204515eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "007e6c9b-000b-4516-9caf-a5c8204515eb" (UID: "007e6c9b-000b-4516-9caf-a5c8204515eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.730846 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e6c9b-000b-4516-9caf-a5c8204515eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.734677 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/007e6c9b-000b-4516-9caf-a5c8204515eb-kube-api-access-tq9f5" (OuterVolumeSpecName: "kube-api-access-tq9f5") pod "007e6c9b-000b-4516-9caf-a5c8204515eb" (UID: "007e6c9b-000b-4516-9caf-a5c8204515eb"). InnerVolumeSpecName "kube-api-access-tq9f5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.832493 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9787cfb5-0f46-4f88-a9e2-608370561edb-operator-scripts\") pod \"9787cfb5-0f46-4f88-a9e2-608370561edb\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.832576 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pq8q8\" (UniqueName: \"kubernetes.io/projected/9787cfb5-0f46-4f88-a9e2-608370561edb-kube-api-access-pq8q8\") pod \"9787cfb5-0f46-4f88-a9e2-608370561edb\" (UID: \"9787cfb5-0f46-4f88-a9e2-608370561edb\") " Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.832938 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9787cfb5-0f46-4f88-a9e2-608370561edb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9787cfb5-0f46-4f88-a9e2-608370561edb" (UID: "9787cfb5-0f46-4f88-a9e2-608370561edb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.833577 4689 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9787cfb5-0f46-4f88-a9e2-608370561edb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.833599 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq9f5\" (UniqueName: \"kubernetes.io/projected/007e6c9b-000b-4516-9caf-a5c8204515eb-kube-api-access-tq9f5\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.837456 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9787cfb5-0f46-4f88-a9e2-608370561edb-kube-api-access-pq8q8" (OuterVolumeSpecName: "kube-api-access-pq8q8") pod "9787cfb5-0f46-4f88-a9e2-608370561edb" (UID: "9787cfb5-0f46-4f88-a9e2-608370561edb"). InnerVolumeSpecName "kube-api-access-pq8q8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:14:59.936132 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pq8q8\" (UniqueName: \"kubernetes.io/projected/9787cfb5-0f46-4f88-a9e2-608370561edb-kube-api-access-pq8q8\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.158093 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn"] Jan 23 11:15:00 crc kubenswrapper[4689]: E0123 11:15:00.158795 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9787cfb5-0f46-4f88-a9e2-608370561edb" containerName="mariadb-account-create-update" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.158813 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="9787cfb5-0f46-4f88-a9e2-608370561edb" containerName="mariadb-account-create-update" Jan 23 11:15:00 crc kubenswrapper[4689]: E0123 11:15:00.158863 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="007e6c9b-000b-4516-9caf-a5c8204515eb" containerName="mariadb-database-create" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.158874 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="007e6c9b-000b-4516-9caf-a5c8204515eb" containerName="mariadb-database-create" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.159243 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="007e6c9b-000b-4516-9caf-a5c8204515eb" containerName="mariadb-database-create" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.159265 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="9787cfb5-0f46-4f88-a9e2-608370561edb" containerName="mariadb-account-create-update" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.160331 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.166532 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1b36-account-create-update-k4c7s" event={"ID":"9787cfb5-0f46-4f88-a9e2-608370561edb","Type":"ContainerDied","Data":"957064b4cc4c64d599cccd4e8968b9184869fc7164aa583c696b5e76c7ca9b3e"} Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.166572 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="957064b4cc4c64d599cccd4e8968b9184869fc7164aa583c696b5e76c7ca9b3e" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.166670 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1b36-account-create-update-k4c7s" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.170720 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.172523 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.179811 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-x9w8s" event={"ID":"007e6c9b-000b-4516-9caf-a5c8204515eb","Type":"ContainerDied","Data":"20848b067e725a93567e57aa3bbd3b65e44f2841c365da70b56aa8582677b8db"} Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.179850 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20848b067e725a93567e57aa3bbd3b65e44f2841c365da70b56aa8582677b8db" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.179911 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-x9w8s" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.183429 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b9b62f22-f147-40e1-a4fc-cd548ce3c065","Type":"ContainerDied","Data":"8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73"} Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.183458 4689 generic.go:334] "Generic (PLEG): container finished" podID="b9b62f22-f147-40e1-a4fc-cd548ce3c065" containerID="8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73" exitCode=0 Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.192987 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5f00d886-838d-4678-acd0-d917f134dd59","Type":"ContainerStarted","Data":"9cc0e68aa509a4d1826192a456baae765298a4f6aca31eda1daaa2ce131c3aa6"} Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.193060 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5f00d886-838d-4678-acd0-d917f134dd59","Type":"ContainerStarted","Data":"ee3c43e9c98b3af9f0039c46ae71e2f3c06e730ab408c5f9404e7ee56bb39cdc"} Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.194294 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.237765 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn"] Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.247133 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-config-volume\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.247487 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpmqp\" (UniqueName: \"kubernetes.io/projected/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-kube-api-access-mpmqp\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.249092 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-secret-volume\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.257529 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.25751095 podStartE2EDuration="2.25751095s" podCreationTimestamp="2026-01-23 11:14:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:00.213254537 +0000 UTC m=+1564.837934396" watchObservedRunningTime="2026-01-23 11:15:00.25751095 +0000 UTC m=+1564.882190809" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.352579 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-secret-volume\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.352949 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-config-volume\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.353278 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpmqp\" (UniqueName: \"kubernetes.io/projected/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-kube-api-access-mpmqp\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.354033 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-config-volume\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.375416 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-secret-volume\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.384485 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpmqp\" (UniqueName: \"kubernetes.io/projected/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-kube-api-access-mpmqp\") pod \"collect-profiles-29486115-6wbkn\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.566663 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.782974 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.865103 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-config-data\") pod \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.865397 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-combined-ca-bundle\") pod \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.865666 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4p4l\" (UniqueName: \"kubernetes.io/projected/b9b62f22-f147-40e1-a4fc-cd548ce3c065-kube-api-access-h4p4l\") pod \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\" (UID: \"b9b62f22-f147-40e1-a4fc-cd548ce3c065\") " Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.871052 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9b62f22-f147-40e1-a4fc-cd548ce3c065-kube-api-access-h4p4l" (OuterVolumeSpecName: "kube-api-access-h4p4l") pod "b9b62f22-f147-40e1-a4fc-cd548ce3c065" (UID: "b9b62f22-f147-40e1-a4fc-cd548ce3c065"). InnerVolumeSpecName "kube-api-access-h4p4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.878202 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.959859 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9b62f22-f147-40e1-a4fc-cd548ce3c065" (UID: "b9b62f22-f147-40e1-a4fc-cd548ce3c065"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.969925 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4p4l\" (UniqueName: \"kubernetes.io/projected/b9b62f22-f147-40e1-a4fc-cd548ce3c065-kube-api-access-h4p4l\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.969962 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:00 crc kubenswrapper[4689]: I0123 11:15:00.985660 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-config-data" (OuterVolumeSpecName: "config-data") pod "b9b62f22-f147-40e1-a4fc-cd548ce3c065" (UID: "b9b62f22-f147-40e1-a4fc-cd548ce3c065"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.012907 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.100007 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-config-data\") pod \"c762a88e-58b4-4f59-baa1-5c67e420d49d\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.100197 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-combined-ca-bundle\") pod \"c762a88e-58b4-4f59-baa1-5c67e420d49d\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.100306 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c762a88e-58b4-4f59-baa1-5c67e420d49d-logs\") pod \"c762a88e-58b4-4f59-baa1-5c67e420d49d\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.100450 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9mk9\" (UniqueName: \"kubernetes.io/projected/c762a88e-58b4-4f59-baa1-5c67e420d49d-kube-api-access-k9mk9\") pod \"c762a88e-58b4-4f59-baa1-5c67e420d49d\" (UID: \"c762a88e-58b4-4f59-baa1-5c67e420d49d\") " Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.101341 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b62f22-f147-40e1-a4fc-cd548ce3c065-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.105746 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c762a88e-58b4-4f59-baa1-5c67e420d49d-kube-api-access-k9mk9" (OuterVolumeSpecName: "kube-api-access-k9mk9") pod "c762a88e-58b4-4f59-baa1-5c67e420d49d" (UID: "c762a88e-58b4-4f59-baa1-5c67e420d49d"). InnerVolumeSpecName "kube-api-access-k9mk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.111704 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c762a88e-58b4-4f59-baa1-5c67e420d49d-logs" (OuterVolumeSpecName: "logs") pod "c762a88e-58b4-4f59-baa1-5c67e420d49d" (UID: "c762a88e-58b4-4f59-baa1-5c67e420d49d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.157199 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-config-data" (OuterVolumeSpecName: "config-data") pod "c762a88e-58b4-4f59-baa1-5c67e420d49d" (UID: "c762a88e-58b4-4f59-baa1-5c67e420d49d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.165828 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c762a88e-58b4-4f59-baa1-5c67e420d49d" (UID: "c762a88e-58b4-4f59-baa1-5c67e420d49d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.205441 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.209324 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.209358 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c762a88e-58b4-4f59-baa1-5c67e420d49d-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.209373 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9mk9\" (UniqueName: \"kubernetes.io/projected/c762a88e-58b4-4f59-baa1-5c67e420d49d-kube-api-access-k9mk9\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.209387 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c762a88e-58b4-4f59-baa1-5c67e420d49d-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.223681 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fec438c-a06a-48cb-a875-9e79fd50b3ca","Type":"ContainerStarted","Data":"4cbdb6c2929300d0bfef471a792415010f0a5577e506ef01a20641f54243906c"} Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.231837 4689 generic.go:334] "Generic (PLEG): container finished" podID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerID="712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4" exitCode=0 Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.231930 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.231961 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c762a88e-58b4-4f59-baa1-5c67e420d49d","Type":"ContainerDied","Data":"712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4"} Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.231990 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c762a88e-58b4-4f59-baa1-5c67e420d49d","Type":"ContainerDied","Data":"67627ce2d9f8c03b8e0f0df494119306c80a881f80067dab6a0d1cc2e7e1b263"} Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.232007 4689 scope.go:117] "RemoveContainer" containerID="712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.236702 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b9b62f22-f147-40e1-a4fc-cd548ce3c065","Type":"ContainerDied","Data":"0ef47879e4caf64fba99cbf9b56e01897090f640ba02e02e088b1c94526c5cf8"} Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.236777 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.249586 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" event={"ID":"2b302f4f-ab38-4efd-b4a7-917f5514cfd1","Type":"ContainerStarted","Data":"fac87fac819b0c315a6fe936edd8ebc232cab69d12b4940d46180f113242d055"} Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.347117 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-kv9jt"] Jan 23 11:15:01 crc kubenswrapper[4689]: E0123 11:15:01.347616 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9b62f22-f147-40e1-a4fc-cd548ce3c065" containerName="nova-scheduler-scheduler" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.347629 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9b62f22-f147-40e1-a4fc-cd548ce3c065" containerName="nova-scheduler-scheduler" Jan 23 11:15:01 crc kubenswrapper[4689]: E0123 11:15:01.347660 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-log" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.347666 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-log" Jan 23 11:15:01 crc kubenswrapper[4689]: E0123 11:15:01.347693 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-api" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.347699 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-api" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.347903 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-log" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.347926 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9b62f22-f147-40e1-a4fc-cd548ce3c065" containerName="nova-scheduler-scheduler" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.347941 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" containerName="nova-api-api" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.348751 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.353595 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.356080 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-hvsrp" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.356314 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.356476 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.388385 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-kv9jt"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.421576 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-config-data\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.422194 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llglv\" (UniqueName: \"kubernetes.io/projected/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-kube-api-access-llglv\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.422353 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-scripts\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.422426 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-combined-ca-bundle\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.459305 4689 scope.go:117] "RemoveContainer" containerID="20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.497353 4689 scope.go:117] "RemoveContainer" containerID="712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4" Jan 23 11:15:01 crc kubenswrapper[4689]: E0123 11:15:01.497747 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4\": container with ID starting with 712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4 not found: ID does not exist" containerID="712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.497785 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4"} err="failed to get container status \"712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4\": rpc error: code = NotFound desc = could not find container \"712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4\": container with ID starting with 712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4 not found: ID does not exist" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.497813 4689 scope.go:117] "RemoveContainer" containerID="20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276" Jan 23 11:15:01 crc kubenswrapper[4689]: E0123 11:15:01.498120 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276\": container with ID starting with 20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276 not found: ID does not exist" containerID="20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.498167 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276"} err="failed to get container status \"20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276\": rpc error: code = NotFound desc = could not find container \"20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276\": container with ID starting with 20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276 not found: ID does not exist" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.498185 4689 scope.go:117] "RemoveContainer" containerID="8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.524096 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-config-data\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.524239 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llglv\" (UniqueName: \"kubernetes.io/projected/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-kube-api-access-llglv\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.524357 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-scripts\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.524406 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-combined-ca-bundle\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.531640 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-scripts\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.533021 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-combined-ca-bundle\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.548422 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llglv\" (UniqueName: \"kubernetes.io/projected/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-kube-api-access-llglv\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.548751 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-config-data\") pod \"aodh-db-sync-kv9jt\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.574812 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.577714 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.605218 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.623262 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.625173 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.628159 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.665719 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9b62f22-f147-40e1-a4fc-cd548ce3c065" path="/var/lib/kubelet/pods/b9b62f22-f147-40e1-a4fc-cd548ce3c065/volumes" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.666678 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.687391 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.708509 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.727614 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.729743 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.733102 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfqpq\" (UniqueName: \"kubernetes.io/projected/d574253a-9d18-410d-8597-51eb7090584c-kube-api-access-rfqpq\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.733901 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-config-data\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.740431 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.744056 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.757450 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.846566 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.846718 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.846830 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-config-data\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.846912 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfqpq\" (UniqueName: \"kubernetes.io/projected/d574253a-9d18-410d-8597-51eb7090584c-kube-api-access-rfqpq\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.846977 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd667b76-f27d-4da2-b70f-36345f94fcf1-logs\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.847052 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxdjs\" (UniqueName: \"kubernetes.io/projected/cd667b76-f27d-4da2-b70f-36345f94fcf1-kube-api-access-wxdjs\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.847184 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-config-data\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.861298 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.865217 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfqpq\" (UniqueName: \"kubernetes.io/projected/d574253a-9d18-410d-8597-51eb7090584c-kube-api-access-rfqpq\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.875109 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-config-data\") pod \"nova-scheduler-0\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.952804 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.953016 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-config-data\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.953151 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd667b76-f27d-4da2-b70f-36345f94fcf1-logs\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.953235 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxdjs\" (UniqueName: \"kubernetes.io/projected/cd667b76-f27d-4da2-b70f-36345f94fcf1-kube-api-access-wxdjs\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.954568 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd667b76-f27d-4da2-b70f-36345f94fcf1-logs\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.959022 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.959948 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-config-data\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.967859 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:01 crc kubenswrapper[4689]: I0123 11:15:01.969361 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxdjs\" (UniqueName: \"kubernetes.io/projected/cd667b76-f27d-4da2-b70f-36345f94fcf1-kube-api-access-wxdjs\") pod \"nova-api-0\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " pod="openstack/nova-api-0" Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.074095 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.229211 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-kv9jt"] Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.322868 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fec438c-a06a-48cb-a875-9e79fd50b3ca","Type":"ContainerStarted","Data":"6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52"} Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.322926 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fec438c-a06a-48cb-a875-9e79fd50b3ca","Type":"ContainerStarted","Data":"5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782"} Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.334445 4689 generic.go:334] "Generic (PLEG): container finished" podID="2b302f4f-ab38-4efd-b4a7-917f5514cfd1" containerID="77b144ba0f8bae66bb0cb0ab5d610d53876703b6b0daf45c926423cca7396597" exitCode=0 Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.334515 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" event={"ID":"2b302f4f-ab38-4efd-b4a7-917f5514cfd1","Type":"ContainerDied","Data":"77b144ba0f8bae66bb0cb0ab5d610d53876703b6b0daf45c926423cca7396597"} Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.363337 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.363311695 podStartE2EDuration="3.363311695s" podCreationTimestamp="2026-01-23 11:14:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:02.350525017 +0000 UTC m=+1566.975204896" watchObservedRunningTime="2026-01-23 11:15:02.363311695 +0000 UTC m=+1566.987991554" Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.553568 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:02 crc kubenswrapper[4689]: I0123 11:15:02.711756 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.357311 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd667b76-f27d-4da2-b70f-36345f94fcf1","Type":"ContainerStarted","Data":"0e1a00b962d145f352b8434826c3b021bd172eb65ac3bfb8e835d9a4c3be3f81"} Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.357659 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd667b76-f27d-4da2-b70f-36345f94fcf1","Type":"ContainerStarted","Data":"e49a500de9fc2c2cd688366b0f579843b7c9448dc17aea350d79c8ba00a99d12"} Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.362517 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d574253a-9d18-410d-8597-51eb7090584c","Type":"ContainerStarted","Data":"74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783"} Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.362545 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d574253a-9d18-410d-8597-51eb7090584c","Type":"ContainerStarted","Data":"ef82f1519fcf45705163b4da24135b498439aa3297229cd378b47e3313c183c4"} Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.366127 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-kv9jt" event={"ID":"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c","Type":"ContainerStarted","Data":"b64e1c0f8ccdf6ce56499814002b7eb4e554ef7eb516b9265f28b32e9ee40d97"} Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.655092 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c762a88e-58b4-4f59-baa1-5c67e420d49d" path="/var/lib/kubelet/pods/c762a88e-58b4-4f59-baa1-5c67e420d49d/volumes" Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.785132 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.808375 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.808354228 podStartE2EDuration="2.808354228s" podCreationTimestamp="2026-01-23 11:15:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:03.379516684 +0000 UTC m=+1568.004196543" watchObservedRunningTime="2026-01-23 11:15:03.808354228 +0000 UTC m=+1568.433034097" Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.916915 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-config-volume\") pod \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.917012 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpmqp\" (UniqueName: \"kubernetes.io/projected/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-kube-api-access-mpmqp\") pod \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.917202 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-secret-volume\") pod \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\" (UID: \"2b302f4f-ab38-4efd-b4a7-917f5514cfd1\") " Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.917850 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-config-volume" (OuterVolumeSpecName: "config-volume") pod "2b302f4f-ab38-4efd-b4a7-917f5514cfd1" (UID: "2b302f4f-ab38-4efd-b4a7-917f5514cfd1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.918935 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.922848 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2b302f4f-ab38-4efd-b4a7-917f5514cfd1" (UID: "2b302f4f-ab38-4efd-b4a7-917f5514cfd1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:03 crc kubenswrapper[4689]: I0123 11:15:03.939358 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-kube-api-access-mpmqp" (OuterVolumeSpecName: "kube-api-access-mpmqp") pod "2b302f4f-ab38-4efd-b4a7-917f5514cfd1" (UID: "2b302f4f-ab38-4efd-b4a7-917f5514cfd1"). InnerVolumeSpecName "kube-api-access-mpmqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.021892 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpmqp\" (UniqueName: \"kubernetes.io/projected/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-kube-api-access-mpmqp\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.021924 4689 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b302f4f-ab38-4efd-b4a7-917f5514cfd1-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.384644 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd667b76-f27d-4da2-b70f-36345f94fcf1","Type":"ContainerStarted","Data":"ac0ab8d2436aa46a8d5c56de8b2ff5b42619e3800080cd984d3e1092416e8f47"} Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.390770 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.392409 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn" event={"ID":"2b302f4f-ab38-4efd-b4a7-917f5514cfd1","Type":"ContainerDied","Data":"fac87fac819b0c315a6fe936edd8ebc232cab69d12b4940d46180f113242d055"} Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.392444 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fac87fac819b0c315a6fe936edd8ebc232cab69d12b4940d46180f113242d055" Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.411123 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.411098195 podStartE2EDuration="3.411098195s" podCreationTimestamp="2026-01-23 11:15:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:04.399214629 +0000 UTC m=+1569.023894488" watchObservedRunningTime="2026-01-23 11:15:04.411098195 +0000 UTC m=+1569.035778054" Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.538730 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:15:04 crc kubenswrapper[4689]: I0123 11:15:04.539875 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:15:06 crc kubenswrapper[4689]: I0123 11:15:06.968317 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 23 11:15:07 crc kubenswrapper[4689]: I0123 11:15:07.426006 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-kv9jt" event={"ID":"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c","Type":"ContainerStarted","Data":"8fb4ee08b8cded89de9a5e23c95558ff0f7464a213648efd6be67612ba917de0"} Jan 23 11:15:07 crc kubenswrapper[4689]: I0123 11:15:07.450590 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-kv9jt" podStartSLOduration=2.298391743 podStartE2EDuration="6.450571663s" podCreationTimestamp="2026-01-23 11:15:01 +0000 UTC" firstStartedPulling="2026-01-23 11:15:02.290078941 +0000 UTC m=+1566.914758800" lastFinishedPulling="2026-01-23 11:15:06.442258821 +0000 UTC m=+1571.066938720" observedRunningTime="2026-01-23 11:15:07.438556194 +0000 UTC m=+1572.063236073" watchObservedRunningTime="2026-01-23 11:15:07.450571663 +0000 UTC m=+1572.075251522" Jan 23 11:15:07 crc kubenswrapper[4689]: I0123 11:15:07.640350 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:15:07 crc kubenswrapper[4689]: E0123 11:15:07.640826 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:15:08 crc kubenswrapper[4689]: I0123 11:15:08.601418 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 23 11:15:09 crc kubenswrapper[4689]: I0123 11:15:09.470437 4689 generic.go:334] "Generic (PLEG): container finished" podID="d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" containerID="8fb4ee08b8cded89de9a5e23c95558ff0f7464a213648efd6be67612ba917de0" exitCode=0 Jan 23 11:15:09 crc kubenswrapper[4689]: I0123 11:15:09.470538 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-kv9jt" event={"ID":"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c","Type":"ContainerDied","Data":"8fb4ee08b8cded89de9a5e23c95558ff0f7464a213648efd6be67612ba917de0"} Jan 23 11:15:09 crc kubenswrapper[4689]: I0123 11:15:09.538648 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 11:15:09 crc kubenswrapper[4689]: I0123 11:15:09.538698 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 11:15:10 crc kubenswrapper[4689]: I0123 11:15:10.554391 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:10 crc kubenswrapper[4689]: I0123 11:15:10.555250 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:10 crc kubenswrapper[4689]: I0123 11:15:10.967925 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.107365 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-combined-ca-bundle\") pod \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.107506 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-config-data\") pod \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.107692 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llglv\" (UniqueName: \"kubernetes.io/projected/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-kube-api-access-llglv\") pod \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.107734 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-scripts\") pod \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\" (UID: \"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c\") " Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.113125 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-kube-api-access-llglv" (OuterVolumeSpecName: "kube-api-access-llglv") pod "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" (UID: "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c"). InnerVolumeSpecName "kube-api-access-llglv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.114239 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-scripts" (OuterVolumeSpecName: "scripts") pod "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" (UID: "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.138113 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-config-data" (OuterVolumeSpecName: "config-data") pod "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" (UID: "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.142977 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" (UID: "d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.210628 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.210843 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llglv\" (UniqueName: \"kubernetes.io/projected/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-kube-api-access-llglv\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.210917 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.210977 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.497194 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-kv9jt" event={"ID":"d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c","Type":"ContainerDied","Data":"b64e1c0f8ccdf6ce56499814002b7eb4e554ef7eb516b9265f28b32e9ee40d97"} Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.497235 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b64e1c0f8ccdf6ce56499814002b7eb4e554ef7eb516b9265f28b32e9ee40d97" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.497286 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-kv9jt" Jan 23 11:15:11 crc kubenswrapper[4689]: I0123 11:15:11.968542 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 23 11:15:12 crc kubenswrapper[4689]: I0123 11:15:12.029537 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 23 11:15:12 crc kubenswrapper[4689]: I0123 11:15:12.075346 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:15:12 crc kubenswrapper[4689]: I0123 11:15:12.075392 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:15:12 crc kubenswrapper[4689]: I0123 11:15:12.567955 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 23 11:15:13 crc kubenswrapper[4689]: I0123 11:15:13.159382 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.254:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:13 crc kubenswrapper[4689]: I0123 11:15:13.159414 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.254:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.765286 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 23 11:15:15 crc kubenswrapper[4689]: E0123 11:15:15.766971 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b302f4f-ab38-4efd-b4a7-917f5514cfd1" containerName="collect-profiles" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.766994 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b302f4f-ab38-4efd-b4a7-917f5514cfd1" containerName="collect-profiles" Jan 23 11:15:15 crc kubenswrapper[4689]: E0123 11:15:15.767010 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" containerName="aodh-db-sync" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.767017 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" containerName="aodh-db-sync" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.767344 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b302f4f-ab38-4efd-b4a7-917f5514cfd1" containerName="collect-profiles" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.767363 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" containerName="aodh-db-sync" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.769600 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.772277 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.772507 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-hvsrp" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.773904 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.777995 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.831303 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6mqw\" (UniqueName: \"kubernetes.io/projected/625db9a8-a775-48bc-923b-69b6cd0c3e7d-kube-api-access-z6mqw\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.831460 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-scripts\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.831492 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-combined-ca-bundle\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.831571 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-config-data\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.933978 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-scripts\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.934036 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-combined-ca-bundle\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.934119 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-config-data\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.934200 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6mqw\" (UniqueName: \"kubernetes.io/projected/625db9a8-a775-48bc-923b-69b6cd0c3e7d-kube-api-access-z6mqw\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.941131 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-config-data\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.942730 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-combined-ca-bundle\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.949522 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-scripts\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:15 crc kubenswrapper[4689]: I0123 11:15:15.951555 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6mqw\" (UniqueName: \"kubernetes.io/projected/625db9a8-a775-48bc-923b-69b6cd0c3e7d-kube-api-access-z6mqw\") pod \"aodh-0\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " pod="openstack/aodh-0" Jan 23 11:15:16 crc kubenswrapper[4689]: I0123 11:15:16.089061 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:15:16 crc kubenswrapper[4689]: I0123 11:15:16.661222 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 23 11:15:17 crc kubenswrapper[4689]: I0123 11:15:17.603588 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerStarted","Data":"f3732e8edeb61790aac74cca04a7e4d8ceca7e7229aada097acf22ebfcc12c32"} Jan 23 11:15:18 crc kubenswrapper[4689]: I0123 11:15:18.667855 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 23 11:15:19 crc kubenswrapper[4689]: I0123 11:15:19.545670 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 11:15:19 crc kubenswrapper[4689]: I0123 11:15:19.550596 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 11:15:19 crc kubenswrapper[4689]: I0123 11:15:19.551397 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 11:15:19 crc kubenswrapper[4689]: I0123 11:15:19.634187 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 11:15:21 crc kubenswrapper[4689]: I0123 11:15:21.640288 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:15:21 crc kubenswrapper[4689]: E0123 11:15:21.640684 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:15:21 crc kubenswrapper[4689]: E0123 11:15:21.758358 4689 manager.go:1116] Failed to create existing container: /kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2: Error finding container 3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2: Status 404 returned error can't find the container with id 3d075935ab026d0d26ecaa72a9face38bc78156e6a8e8607f15fde5c5268f9c2 Jan 23 11:15:21 crc kubenswrapper[4689]: W0123 11:15:21.825476 4689 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b302f4f_ab38_4efd_b4a7_917f5514cfd1.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b302f4f_ab38_4efd_b4a7_917f5514cfd1.slice: no such file or directory Jan 23 11:15:21 crc kubenswrapper[4689]: W0123 11:15:21.826561 4689 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6d8e7cf_c5cc_478e_a2e6_bf7fc584d70c.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6d8e7cf_c5cc_478e_a2e6_bf7fc584d70c.slice: no such file or directory Jan 23 11:15:21 crc kubenswrapper[4689]: E0123 11:15:21.872023 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod007e6c9b_000b_4516_9caf_a5c8204515eb.slice/crio-20848b067e725a93567e57aa3bbd3b65e44f2841c365da70b56aa8582677b8db\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9787cfb5_0f46_4f88_a9e2_608370561edb.slice/crio-957064b4cc4c64d599cccd4e8968b9184869fc7164aa583c696b5e76c7ca9b3e\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a086a5_30c9_425a_9cc8_bfc7ff439d23.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-conmon-61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-conmon-712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod007e6c9b_000b_4516_9caf_a5c8204515eb.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-conmon-20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2f42c42_b409_4a81_ae96_5b5a3b62263f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-0ef47879e4caf64fba99cbf9b56e01897090f640ba02e02e088b1c94526c5cf8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-67627ce2d9f8c03b8e0f0df494119306c80a881f80067dab6a0d1cc2e7e1b263\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-conmon-f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9787cfb5_0f46_4f88_a9e2_608370561edb.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2f42c42_b409_4a81_ae96_5b5a3b62263f.slice/crio-84efd3492830f8e6b0f0e2fe020087afda5b3a43f8cbd371a83ba26387032458\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-conmon-8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a086a5_30c9_425a_9cc8_bfc7ff439d23.slice/crio-9dbe739de7249b376c002714c81d1ccba141dc708ea171a02e7201824fc38087\": RecentStats: unable to find data in memory cache]" Jan 23 11:15:21 crc kubenswrapper[4689]: E0123 11:15:21.872217 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod120cf4c8_6188_4715_b5f3_87c3e49e359b.slice/crio-1c177560fbd636cd4fdb48213830a8f20011c3d4f803ad6ccf3a39d974198f03\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a086a5_30c9_425a_9cc8_bfc7ff439d23.slice/crio-conmon-ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-conmon-20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-0ef47879e4caf64fba99cbf9b56e01897090f640ba02e02e088b1c94526c5cf8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2f42c42_b409_4a81_ae96_5b5a3b62263f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a086a5_30c9_425a_9cc8_bfc7ff439d23.slice/crio-ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-conmon-8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-conmon-f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-conmon-712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-conmon-61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice\": RecentStats: unable to find data in memory cache]" Jan 23 11:15:21 crc kubenswrapper[4689]: E0123 11:15:21.872400 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a086a5_30c9_425a_9cc8_bfc7ff439d23.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-8c0c5d286eaabab08c31b5502e7f70d449b8ef0135d3545f984354ea86cd7d73.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a086a5_30c9_425a_9cc8_bfc7ff439d23.slice/crio-ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a086a5_30c9_425a_9cc8_bfc7ff439d23.slice/crio-conmon-ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-conmon-712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-conmon-20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-61f6b2d8a2719731f88cb2ed5deb3d5b575768404eab9d1d1607e00c69740514.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9128fe08_a23a_43f9_8fc6_91eed858ba84.slice/crio-f905f0b3a8ac81363039a7364bdf8ba464fa767772173f872c6529d1f8e6d842.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9b62f22_f147_40e1_a4fc_cd548ce3c065.slice/crio-0ef47879e4caf64fba99cbf9b56e01897090f640ba02e02e088b1c94526c5cf8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-20173bde5ec1bb41a4df3e5cfe5f95616586133c12a948227ed47580d0b9f276.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2f42c42_b409_4a81_ae96_5b5a3b62263f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc762a88e_58b4_4f59_baa1_5c67e420d49d.slice/crio-712e20885e1537eab8a503458d4b75303f30058a4d32d778f9da93e646d993d4.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:15:21 crc kubenswrapper[4689]: I0123 11:15:21.947744 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:21 crc kubenswrapper[4689]: I0123 11:15:21.948311 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-central-agent" containerID="cri-o://0ec0bfff9fdb26b7911c040e3934a635b1dc984c7d8efe5eb340f531ab20a473" gracePeriod=30 Jan 23 11:15:21 crc kubenswrapper[4689]: I0123 11:15:21.948917 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="sg-core" containerID="cri-o://31f0c2b65a74ac161bbad3b5d41f4382e9cf06395fa6fb3fd13e65b70b151e59" gracePeriod=30 Jan 23 11:15:21 crc kubenswrapper[4689]: I0123 11:15:21.948963 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-notification-agent" containerID="cri-o://6bdb06fe70b6db3b72df82f7be0e104e39fb0602bb0614af52d4dca89caae423" gracePeriod=30 Jan 23 11:15:21 crc kubenswrapper[4689]: I0123 11:15:21.948987 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="proxy-httpd" containerID="cri-o://8e05a396643386b57b4b4ce124d29bfc25d476720883b87bf06fb5130320d428" gracePeriod=30 Jan 23 11:15:21 crc kubenswrapper[4689]: I0123 11:15:21.978412 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.082014 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.083806 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.093585 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.115935 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.297872 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.393842 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-combined-ca-bundle\") pod \"120cf4c8-6188-4715-b5f3-87c3e49e359b\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.393942 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gn74\" (UniqueName: \"kubernetes.io/projected/120cf4c8-6188-4715-b5f3-87c3e49e359b-kube-api-access-8gn74\") pod \"120cf4c8-6188-4715-b5f3-87c3e49e359b\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.394094 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-config-data\") pod \"120cf4c8-6188-4715-b5f3-87c3e49e359b\" (UID: \"120cf4c8-6188-4715-b5f3-87c3e49e359b\") " Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.400448 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/120cf4c8-6188-4715-b5f3-87c3e49e359b-kube-api-access-8gn74" (OuterVolumeSpecName: "kube-api-access-8gn74") pod "120cf4c8-6188-4715-b5f3-87c3e49e359b" (UID: "120cf4c8-6188-4715-b5f3-87c3e49e359b"). InnerVolumeSpecName "kube-api-access-8gn74". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.425014 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-config-data" (OuterVolumeSpecName: "config-data") pod "120cf4c8-6188-4715-b5f3-87c3e49e359b" (UID: "120cf4c8-6188-4715-b5f3-87c3e49e359b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.434130 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "120cf4c8-6188-4715-b5f3-87c3e49e359b" (UID: "120cf4c8-6188-4715-b5f3-87c3e49e359b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.497981 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gn74\" (UniqueName: \"kubernetes.io/projected/120cf4c8-6188-4715-b5f3-87c3e49e359b-kube-api-access-8gn74\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.498051 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.498062 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/120cf4c8-6188-4715-b5f3-87c3e49e359b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.665406 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerStarted","Data":"4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8"} Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.669598 4689 generic.go:334] "Generic (PLEG): container finished" podID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerID="31f0c2b65a74ac161bbad3b5d41f4382e9cf06395fa6fb3fd13e65b70b151e59" exitCode=2 Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.669658 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerDied","Data":"31f0c2b65a74ac161bbad3b5d41f4382e9cf06395fa6fb3fd13e65b70b151e59"} Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.671466 4689 generic.go:334] "Generic (PLEG): container finished" podID="120cf4c8-6188-4715-b5f3-87c3e49e359b" containerID="3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf" exitCode=137 Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.671643 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"120cf4c8-6188-4715-b5f3-87c3e49e359b","Type":"ContainerDied","Data":"3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf"} Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.671794 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.671883 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"120cf4c8-6188-4715-b5f3-87c3e49e359b","Type":"ContainerDied","Data":"1c177560fbd636cd4fdb48213830a8f20011c3d4f803ad6ccf3a39d974198f03"} Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.671834 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.671826 4689 scope.go:117] "RemoveContainer" containerID="3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.683014 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.713704 4689 scope.go:117] "RemoveContainer" containerID="3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf" Jan 23 11:15:22 crc kubenswrapper[4689]: E0123 11:15:22.714129 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf\": container with ID starting with 3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf not found: ID does not exist" containerID="3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.714175 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf"} err="failed to get container status \"3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf\": rpc error: code = NotFound desc = could not find container \"3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf\": container with ID starting with 3cd9e162eae1a5fb0396b50477b6e7147ab69264c5cbca782028ff8345a3f9cf not found: ID does not exist" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.764306 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.796716 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.862204 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:15:22 crc kubenswrapper[4689]: E0123 11:15:22.862754 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="120cf4c8-6188-4715-b5f3-87c3e49e359b" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.862773 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="120cf4c8-6188-4715-b5f3-87c3e49e359b" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.863007 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="120cf4c8-6188-4715-b5f3-87c3e49e359b" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.863862 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.869691 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.869899 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.870035 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.930425 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.968870 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d99f6bc7f-rftzp"] Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.971480 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:22 crc kubenswrapper[4689]: I0123 11:15:22.988867 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d99f6bc7f-rftzp"] Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.014277 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.022681 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.022812 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qfqt\" (UniqueName: \"kubernetes.io/projected/71303b7c-44e9-4746-85e3-21c519e54d54-kube-api-access-8qfqt\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.023524 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.023758 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126277 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-svc\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126348 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126378 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-sb\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126399 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dttpm\" (UniqueName: \"kubernetes.io/projected/b26c1980-a23a-4a11-a321-11fe4bd6b641-kube-api-access-dttpm\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126452 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qfqt\" (UniqueName: \"kubernetes.io/projected/71303b7c-44e9-4746-85e3-21c519e54d54-kube-api-access-8qfqt\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126491 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126582 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-nb\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126638 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126675 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-config\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126721 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-swift-storage-0\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.126786 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.141265 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.141367 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.142244 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.144299 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/71303b7c-44e9-4746-85e3-21c519e54d54-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.167703 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qfqt\" (UniqueName: \"kubernetes.io/projected/71303b7c-44e9-4746-85e3-21c519e54d54-kube-api-access-8qfqt\") pod \"nova-cell1-novncproxy-0\" (UID: \"71303b7c-44e9-4746-85e3-21c519e54d54\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.222987 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.245579 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-config\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.245848 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-swift-storage-0\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.246072 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-svc\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.246248 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-sb\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.246325 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dttpm\" (UniqueName: \"kubernetes.io/projected/b26c1980-a23a-4a11-a321-11fe4bd6b641-kube-api-access-dttpm\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.246379 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-config\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.246664 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-nb\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.247064 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-svc\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.247621 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-nb\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.247937 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-swift-storage-0\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.248363 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-sb\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.280937 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dttpm\" (UniqueName: \"kubernetes.io/projected/b26c1980-a23a-4a11-a321-11fe4bd6b641-kube-api-access-dttpm\") pod \"dnsmasq-dns-6d99f6bc7f-rftzp\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.308205 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.659979 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="120cf4c8-6188-4715-b5f3-87c3e49e359b" path="/var/lib/kubelet/pods/120cf4c8-6188-4715-b5f3-87c3e49e359b/volumes" Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.688635 4689 generic.go:334] "Generic (PLEG): container finished" podID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerID="8e05a396643386b57b4b4ce124d29bfc25d476720883b87bf06fb5130320d428" exitCode=0 Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.688672 4689 generic.go:334] "Generic (PLEG): container finished" podID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerID="0ec0bfff9fdb26b7911c040e3934a635b1dc984c7d8efe5eb340f531ab20a473" exitCode=0 Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.688759 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerDied","Data":"8e05a396643386b57b4b4ce124d29bfc25d476720883b87bf06fb5130320d428"} Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.688832 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerDied","Data":"0ec0bfff9fdb26b7911c040e3934a635b1dc984c7d8efe5eb340f531ab20a473"} Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.917708 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 11:15:23 crc kubenswrapper[4689]: I0123 11:15:23.948573 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d99f6bc7f-rftzp"] Jan 23 11:15:24 crc kubenswrapper[4689]: I0123 11:15:24.405946 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.246:3000/\": dial tcp 10.217.0.246:3000: connect: connection refused" Jan 23 11:15:24 crc kubenswrapper[4689]: I0123 11:15:24.708752 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" event={"ID":"b26c1980-a23a-4a11-a321-11fe4bd6b641","Type":"ContainerStarted","Data":"193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542"} Jan 23 11:15:24 crc kubenswrapper[4689]: I0123 11:15:24.709000 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" event={"ID":"b26c1980-a23a-4a11-a321-11fe4bd6b641","Type":"ContainerStarted","Data":"601333528c3a016f20d9da27bca39be50f00ffa4dad148844febb3ab66b4c394"} Jan 23 11:15:24 crc kubenswrapper[4689]: I0123 11:15:24.717303 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"71303b7c-44e9-4746-85e3-21c519e54d54","Type":"ContainerStarted","Data":"ade7e08bf826fba93476d9ada78e1a410a9d8f034211f559c2a8248f794b511c"} Jan 23 11:15:24 crc kubenswrapper[4689]: I0123 11:15:24.717338 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"71303b7c-44e9-4746-85e3-21c519e54d54","Type":"ContainerStarted","Data":"1205aca24a179447e694b64ff8a6a52c256936a82585d098a6b0530da33c3fb3"} Jan 23 11:15:24 crc kubenswrapper[4689]: I0123 11:15:24.757383 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.757362857 podStartE2EDuration="2.757362857s" podCreationTimestamp="2026-01-23 11:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:24.746548787 +0000 UTC m=+1589.371228646" watchObservedRunningTime="2026-01-23 11:15:24.757362857 +0000 UTC m=+1589.382042716" Jan 23 11:15:25 crc kubenswrapper[4689]: I0123 11:15:25.655057 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:25 crc kubenswrapper[4689]: I0123 11:15:25.730315 4689 generic.go:334] "Generic (PLEG): container finished" podID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerID="6bdb06fe70b6db3b72df82f7be0e104e39fb0602bb0614af52d4dca89caae423" exitCode=0 Jan 23 11:15:25 crc kubenswrapper[4689]: I0123 11:15:25.730396 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerDied","Data":"6bdb06fe70b6db3b72df82f7be0e104e39fb0602bb0614af52d4dca89caae423"} Jan 23 11:15:25 crc kubenswrapper[4689]: I0123 11:15:25.733034 4689 generic.go:334] "Generic (PLEG): container finished" podID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerID="193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542" exitCode=0 Jan 23 11:15:25 crc kubenswrapper[4689]: I0123 11:15:25.733122 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" event={"ID":"b26c1980-a23a-4a11-a321-11fe4bd6b641","Type":"ContainerDied","Data":"193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542"} Jan 23 11:15:25 crc kubenswrapper[4689]: I0123 11:15:25.733805 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-api" containerID="cri-o://ac0ab8d2436aa46a8d5c56de8b2ff5b42619e3800080cd984d3e1092416e8f47" gracePeriod=30 Jan 23 11:15:25 crc kubenswrapper[4689]: I0123 11:15:25.733803 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-log" containerID="cri-o://0e1a00b962d145f352b8434826c3b021bd172eb65ac3bfb8e835d9a4c3be3f81" gracePeriod=30 Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.334661 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.365613 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-combined-ca-bundle\") pod \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.365670 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-scripts\") pod \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.365934 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9srtn\" (UniqueName: \"kubernetes.io/projected/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-kube-api-access-9srtn\") pod \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.366028 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-config-data\") pod \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.366104 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-sg-core-conf-yaml\") pod \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.366127 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-run-httpd\") pod \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.366525 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-log-httpd\") pod \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\" (UID: \"caa2f2f0-0618-4f1f-97bd-8301459d3fd7\") " Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.366552 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "caa2f2f0-0618-4f1f-97bd-8301459d3fd7" (UID: "caa2f2f0-0618-4f1f-97bd-8301459d3fd7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.366968 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "caa2f2f0-0618-4f1f-97bd-8301459d3fd7" (UID: "caa2f2f0-0618-4f1f-97bd-8301459d3fd7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.367219 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.367235 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.371849 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-scripts" (OuterVolumeSpecName: "scripts") pod "caa2f2f0-0618-4f1f-97bd-8301459d3fd7" (UID: "caa2f2f0-0618-4f1f-97bd-8301459d3fd7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.375227 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-kube-api-access-9srtn" (OuterVolumeSpecName: "kube-api-access-9srtn") pod "caa2f2f0-0618-4f1f-97bd-8301459d3fd7" (UID: "caa2f2f0-0618-4f1f-97bd-8301459d3fd7"). InnerVolumeSpecName "kube-api-access-9srtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.411384 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "caa2f2f0-0618-4f1f-97bd-8301459d3fd7" (UID: "caa2f2f0-0618-4f1f-97bd-8301459d3fd7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.474621 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9srtn\" (UniqueName: \"kubernetes.io/projected/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-kube-api-access-9srtn\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.474675 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.474687 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.502401 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "caa2f2f0-0618-4f1f-97bd-8301459d3fd7" (UID: "caa2f2f0-0618-4f1f-97bd-8301459d3fd7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.527005 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-config-data" (OuterVolumeSpecName: "config-data") pod "caa2f2f0-0618-4f1f-97bd-8301459d3fd7" (UID: "caa2f2f0-0618-4f1f-97bd-8301459d3fd7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.576311 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.576348 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa2f2f0-0618-4f1f-97bd-8301459d3fd7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.768976 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerStarted","Data":"39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8"} Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.771297 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" event={"ID":"b26c1980-a23a-4a11-a321-11fe4bd6b641","Type":"ContainerStarted","Data":"a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068"} Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.771459 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.775370 4689 generic.go:334] "Generic (PLEG): container finished" podID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerID="0e1a00b962d145f352b8434826c3b021bd172eb65ac3bfb8e835d9a4c3be3f81" exitCode=143 Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.775444 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd667b76-f27d-4da2-b70f-36345f94fcf1","Type":"ContainerDied","Data":"0e1a00b962d145f352b8434826c3b021bd172eb65ac3bfb8e835d9a4c3be3f81"} Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.803010 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"caa2f2f0-0618-4f1f-97bd-8301459d3fd7","Type":"ContainerDied","Data":"3bc59efe95cba59454e4783d3c06b29062bf77d692913d20159ec3fa65229996"} Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.803352 4689 scope.go:117] "RemoveContainer" containerID="8e05a396643386b57b4b4ce124d29bfc25d476720883b87bf06fb5130320d428" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.803727 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.873724 4689 scope.go:117] "RemoveContainer" containerID="31f0c2b65a74ac161bbad3b5d41f4382e9cf06395fa6fb3fd13e65b70b151e59" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.928057 4689 scope.go:117] "RemoveContainer" containerID="6bdb06fe70b6db3b72df82f7be0e104e39fb0602bb0614af52d4dca89caae423" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.935168 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" podStartSLOduration=4.935130175 podStartE2EDuration="4.935130175s" podCreationTimestamp="2026-01-23 11:15:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:26.814109271 +0000 UTC m=+1591.438789130" watchObservedRunningTime="2026-01-23 11:15:26.935130175 +0000 UTC m=+1591.559810034" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.936972 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.957056 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.965317 4689 scope.go:117] "RemoveContainer" containerID="0ec0bfff9fdb26b7911c040e3934a635b1dc984c7d8efe5eb340f531ab20a473" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.981521 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:26 crc kubenswrapper[4689]: E0123 11:15:26.982372 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="sg-core" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.982470 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="sg-core" Jan 23 11:15:26 crc kubenswrapper[4689]: E0123 11:15:26.982668 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="proxy-httpd" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.982869 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="proxy-httpd" Jan 23 11:15:26 crc kubenswrapper[4689]: E0123 11:15:26.983012 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-notification-agent" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.983113 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-notification-agent" Jan 23 11:15:26 crc kubenswrapper[4689]: E0123 11:15:26.983273 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-central-agent" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.983380 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-central-agent" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.983847 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-notification-agent" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.987498 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="ceilometer-central-agent" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.987694 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="sg-core" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.987788 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" containerName="proxy-httpd" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.994669 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.994801 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.997059 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:15:26 crc kubenswrapper[4689]: I0123 11:15:26.997283 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.123663 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:27 crc kubenswrapper[4689]: E0123 11:15:27.124852 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-vmxrl log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="afefceee-cd61-4bc3-8d9a-f36e1262bb93" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.195095 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmxrl\" (UniqueName: \"kubernetes.io/projected/afefceee-cd61-4bc3-8d9a-f36e1262bb93-kube-api-access-vmxrl\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.195185 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-config-data\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.195228 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-log-httpd\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.195247 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.195272 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-scripts\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.195317 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.195349 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-run-httpd\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.297721 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmxrl\" (UniqueName: \"kubernetes.io/projected/afefceee-cd61-4bc3-8d9a-f36e1262bb93-kube-api-access-vmxrl\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.297786 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-config-data\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.297815 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-log-httpd\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.297835 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.297866 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-scripts\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.297911 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.297931 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-run-httpd\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.298855 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-run-httpd\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.298856 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-log-httpd\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.303298 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.303720 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.304218 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-scripts\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.305917 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-config-data\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.314981 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmxrl\" (UniqueName: \"kubernetes.io/projected/afefceee-cd61-4bc3-8d9a-f36e1262bb93-kube-api-access-vmxrl\") pod \"ceilometer-0\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.655017 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caa2f2f0-0618-4f1f-97bd-8301459d3fd7" path="/var/lib/kubelet/pods/caa2f2f0-0618-4f1f-97bd-8301459d3fd7/volumes" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.813307 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:27 crc kubenswrapper[4689]: I0123 11:15:27.829686 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.015009 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-sg-core-conf-yaml\") pod \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.015095 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-scripts\") pod \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.015211 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-log-httpd\") pod \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.015657 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "afefceee-cd61-4bc3-8d9a-f36e1262bb93" (UID: "afefceee-cd61-4bc3-8d9a-f36e1262bb93"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.015959 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-config-data\") pod \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.016186 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-run-httpd\") pod \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.016242 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-combined-ca-bundle\") pod \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.016344 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmxrl\" (UniqueName: \"kubernetes.io/projected/afefceee-cd61-4bc3-8d9a-f36e1262bb93-kube-api-access-vmxrl\") pod \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\" (UID: \"afefceee-cd61-4bc3-8d9a-f36e1262bb93\") " Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.016408 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "afefceee-cd61-4bc3-8d9a-f36e1262bb93" (UID: "afefceee-cd61-4bc3-8d9a-f36e1262bb93"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.017908 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.017937 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afefceee-cd61-4bc3-8d9a-f36e1262bb93-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.024776 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afefceee-cd61-4bc3-8d9a-f36e1262bb93-kube-api-access-vmxrl" (OuterVolumeSpecName: "kube-api-access-vmxrl") pod "afefceee-cd61-4bc3-8d9a-f36e1262bb93" (UID: "afefceee-cd61-4bc3-8d9a-f36e1262bb93"). InnerVolumeSpecName "kube-api-access-vmxrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.033337 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-config-data" (OuterVolumeSpecName: "config-data") pod "afefceee-cd61-4bc3-8d9a-f36e1262bb93" (UID: "afefceee-cd61-4bc3-8d9a-f36e1262bb93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.033390 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-scripts" (OuterVolumeSpecName: "scripts") pod "afefceee-cd61-4bc3-8d9a-f36e1262bb93" (UID: "afefceee-cd61-4bc3-8d9a-f36e1262bb93"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.033407 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "afefceee-cd61-4bc3-8d9a-f36e1262bb93" (UID: "afefceee-cd61-4bc3-8d9a-f36e1262bb93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.036275 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "afefceee-cd61-4bc3-8d9a-f36e1262bb93" (UID: "afefceee-cd61-4bc3-8d9a-f36e1262bb93"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.122086 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.122132 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmxrl\" (UniqueName: \"kubernetes.io/projected/afefceee-cd61-4bc3-8d9a-f36e1262bb93-kube-api-access-vmxrl\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.122164 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.122178 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.122190 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afefceee-cd61-4bc3-8d9a-f36e1262bb93-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.223867 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.822823 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.901873 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.917885 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.936346 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.940207 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.944585 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.944619 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:15:28 crc kubenswrapper[4689]: I0123 11:15:28.976309 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.050250 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-scripts\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.050335 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-run-httpd\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.050414 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.050444 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqp7s\" (UniqueName: \"kubernetes.io/projected/228f582d-381b-4e5c-a71b-99f787468c36-kube-api-access-dqp7s\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.050584 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-config-data\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.050640 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-log-httpd\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.050735 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.152843 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-config-data\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.152944 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-log-httpd\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.153056 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.153094 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-scripts\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.153141 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-run-httpd\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.153236 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.153269 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqp7s\" (UniqueName: \"kubernetes.io/projected/228f582d-381b-4e5c-a71b-99f787468c36-kube-api-access-dqp7s\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.154110 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-log-httpd\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.154405 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-run-httpd\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.159236 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-scripts\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.159511 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-config-data\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.160387 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.161890 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.174295 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqp7s\" (UniqueName: \"kubernetes.io/projected/228f582d-381b-4e5c-a71b-99f787468c36-kube-api-access-dqp7s\") pod \"ceilometer-0\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.319796 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.654290 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afefceee-cd61-4bc3-8d9a-f36e1262bb93" path="/var/lib/kubelet/pods/afefceee-cd61-4bc3-8d9a-f36e1262bb93/volumes" Jan 23 11:15:29 crc kubenswrapper[4689]: W0123 11:15:29.777070 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod228f582d_381b_4e5c_a71b_99f787468c36.slice/crio-32554ba526d6aa06bf1aa17cc212504eb59c1390a6c7979b04b6f6cfd74a9954 WatchSource:0}: Error finding container 32554ba526d6aa06bf1aa17cc212504eb59c1390a6c7979b04b6f6cfd74a9954: Status 404 returned error can't find the container with id 32554ba526d6aa06bf1aa17cc212504eb59c1390a6c7979b04b6f6cfd74a9954 Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.786276 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.836237 4689 generic.go:334] "Generic (PLEG): container finished" podID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerID="ac0ab8d2436aa46a8d5c56de8b2ff5b42619e3800080cd984d3e1092416e8f47" exitCode=0 Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.836321 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd667b76-f27d-4da2-b70f-36345f94fcf1","Type":"ContainerDied","Data":"ac0ab8d2436aa46a8d5c56de8b2ff5b42619e3800080cd984d3e1092416e8f47"} Jan 23 11:15:29 crc kubenswrapper[4689]: I0123 11:15:29.838536 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerStarted","Data":"32554ba526d6aa06bf1aa17cc212504eb59c1390a6c7979b04b6f6cfd74a9954"} Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.547487 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.690210 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-combined-ca-bundle\") pod \"cd667b76-f27d-4da2-b70f-36345f94fcf1\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.690757 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxdjs\" (UniqueName: \"kubernetes.io/projected/cd667b76-f27d-4da2-b70f-36345f94fcf1-kube-api-access-wxdjs\") pod \"cd667b76-f27d-4da2-b70f-36345f94fcf1\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.690803 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd667b76-f27d-4da2-b70f-36345f94fcf1-logs\") pod \"cd667b76-f27d-4da2-b70f-36345f94fcf1\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.690833 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-config-data\") pod \"cd667b76-f27d-4da2-b70f-36345f94fcf1\" (UID: \"cd667b76-f27d-4da2-b70f-36345f94fcf1\") " Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.692093 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd667b76-f27d-4da2-b70f-36345f94fcf1-logs" (OuterVolumeSpecName: "logs") pod "cd667b76-f27d-4da2-b70f-36345f94fcf1" (UID: "cd667b76-f27d-4da2-b70f-36345f94fcf1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.715390 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd667b76-f27d-4da2-b70f-36345f94fcf1-kube-api-access-wxdjs" (OuterVolumeSpecName: "kube-api-access-wxdjs") pod "cd667b76-f27d-4da2-b70f-36345f94fcf1" (UID: "cd667b76-f27d-4da2-b70f-36345f94fcf1"). InnerVolumeSpecName "kube-api-access-wxdjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.730056 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd667b76-f27d-4da2-b70f-36345f94fcf1" (UID: "cd667b76-f27d-4da2-b70f-36345f94fcf1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.742682 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-config-data" (OuterVolumeSpecName: "config-data") pod "cd667b76-f27d-4da2-b70f-36345f94fcf1" (UID: "cd667b76-f27d-4da2-b70f-36345f94fcf1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.794996 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.795024 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd667b76-f27d-4da2-b70f-36345f94fcf1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.795035 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxdjs\" (UniqueName: \"kubernetes.io/projected/cd667b76-f27d-4da2-b70f-36345f94fcf1-kube-api-access-wxdjs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.795045 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd667b76-f27d-4da2-b70f-36345f94fcf1-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.850668 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd667b76-f27d-4da2-b70f-36345f94fcf1","Type":"ContainerDied","Data":"e49a500de9fc2c2cd688366b0f579843b7c9448dc17aea350d79c8ba00a99d12"} Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.850742 4689 scope.go:117] "RemoveContainer" containerID="ac0ab8d2436aa46a8d5c56de8b2ff5b42619e3800080cd984d3e1092416e8f47" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.851435 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.888105 4689 scope.go:117] "RemoveContainer" containerID="0e1a00b962d145f352b8434826c3b021bd172eb65ac3bfb8e835d9a4c3be3f81" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.894853 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.916924 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.932533 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:30 crc kubenswrapper[4689]: E0123 11:15:30.933049 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-log" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.933066 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-log" Jan 23 11:15:30 crc kubenswrapper[4689]: E0123 11:15:30.933079 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-api" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.933085 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-api" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.933342 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-log" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.933361 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" containerName="nova-api-api" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.934521 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.942841 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.943117 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.944663 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.948564 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.998784 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-logs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.999516 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6hn2\" (UniqueName: \"kubernetes.io/projected/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-kube-api-access-z6hn2\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.999617 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-public-tls-certs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.999813 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-internal-tls-certs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:30 crc kubenswrapper[4689]: I0123 11:15:30.999969 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.000200 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-config-data\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.102864 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-logs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.103082 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6hn2\" (UniqueName: \"kubernetes.io/projected/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-kube-api-access-z6hn2\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.103383 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-public-tls-certs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.103469 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-internal-tls-certs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.103564 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.103715 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-config-data\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.104522 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-logs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.109119 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.109508 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-internal-tls-certs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.111027 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-config-data\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.118050 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-public-tls-certs\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.120001 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6hn2\" (UniqueName: \"kubernetes.io/projected/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-kube-api-access-z6hn2\") pod \"nova-api-0\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.262378 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.652662 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd667b76-f27d-4da2-b70f-36345f94fcf1" path="/var/lib/kubelet/pods/cd667b76-f27d-4da2-b70f-36345f94fcf1/volumes" Jan 23 11:15:31 crc kubenswrapper[4689]: W0123 11:15:31.719560 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07f1eb7d_276b_4fe1_8546_e1f72bf09aca.slice/crio-97d84b5c4c7593fca7777a3a14875f1320df8c08f375f98cca6c77ea74c352b0 WatchSource:0}: Error finding container 97d84b5c4c7593fca7777a3a14875f1320df8c08f375f98cca6c77ea74c352b0: Status 404 returned error can't find the container with id 97d84b5c4c7593fca7777a3a14875f1320df8c08f375f98cca6c77ea74c352b0 Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.721792 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:31 crc kubenswrapper[4689]: I0123 11:15:31.866044 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07f1eb7d-276b-4fe1-8546-e1f72bf09aca","Type":"ContainerStarted","Data":"97d84b5c4c7593fca7777a3a14875f1320df8c08f375f98cca6c77ea74c352b0"} Jan 23 11:15:33 crc kubenswrapper[4689]: I0123 11:15:33.225113 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:33 crc kubenswrapper[4689]: I0123 11:15:33.246769 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:33 crc kubenswrapper[4689]: I0123 11:15:33.310195 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:15:33 crc kubenswrapper[4689]: I0123 11:15:33.372085 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7877d89589-pf8mb"] Jan 23 11:15:33 crc kubenswrapper[4689]: I0123 11:15:33.372388 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" podUID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerName="dnsmasq-dns" containerID="cri-o://e4d243ffbd82cfab70c1b4a2d90c9394f6ee95016744e18cf204fde54906c2e1" gracePeriod=10 Jan 23 11:15:33 crc kubenswrapper[4689]: I0123 11:15:33.640982 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:15:33 crc kubenswrapper[4689]: E0123 11:15:33.641675 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:15:33 crc kubenswrapper[4689]: I0123 11:15:33.907784 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.119475 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-mhxmh"] Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.121097 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.123343 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.123526 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.144651 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mhxmh"] Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.190622 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-config-data\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.190670 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-scripts\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.190926 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.191275 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln9jd\" (UniqueName: \"kubernetes.io/projected/d6502814-424c-4bb5-bd7f-bd986f84c813-kube-api-access-ln9jd\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.293222 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln9jd\" (UniqueName: \"kubernetes.io/projected/d6502814-424c-4bb5-bd7f-bd986f84c813-kube-api-access-ln9jd\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.293416 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-config-data\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.293448 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-scripts\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.293529 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.300503 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-scripts\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.301053 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.302701 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-config-data\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.311677 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln9jd\" (UniqueName: \"kubernetes.io/projected/d6502814-424c-4bb5-bd7f-bd986f84c813-kube-api-access-ln9jd\") pod \"nova-cell1-cell-mapping-mhxmh\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:34 crc kubenswrapper[4689]: I0123 11:15:34.448691 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:35 crc kubenswrapper[4689]: W0123 11:15:35.033620 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6502814_424c_4bb5_bd7f_bd986f84c813.slice/crio-f9915ad3c5f45d434659427e208edfbd550d5eb65d1740f00a0b1e56f45f2152 WatchSource:0}: Error finding container f9915ad3c5f45d434659427e208edfbd550d5eb65d1740f00a0b1e56f45f2152: Status 404 returned error can't find the container with id f9915ad3c5f45d434659427e208edfbd550d5eb65d1740f00a0b1e56f45f2152 Jan 23 11:15:35 crc kubenswrapper[4689]: I0123 11:15:35.042505 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mhxmh"] Jan 23 11:15:35 crc kubenswrapper[4689]: I0123 11:15:35.914606 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07f1eb7d-276b-4fe1-8546-e1f72bf09aca","Type":"ContainerStarted","Data":"b1fdadfec2dd21fe29cfe215cc7c72e14a4715875cf30528f8ce8f8ac6637028"} Jan 23 11:15:35 crc kubenswrapper[4689]: I0123 11:15:35.916985 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mhxmh" event={"ID":"d6502814-424c-4bb5-bd7f-bd986f84c813","Type":"ContainerStarted","Data":"f9915ad3c5f45d434659427e208edfbd550d5eb65d1740f00a0b1e56f45f2152"} Jan 23 11:15:35 crc kubenswrapper[4689]: I0123 11:15:35.919353 4689 generic.go:334] "Generic (PLEG): container finished" podID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerID="e4d243ffbd82cfab70c1b4a2d90c9394f6ee95016744e18cf204fde54906c2e1" exitCode=0 Jan 23 11:15:35 crc kubenswrapper[4689]: I0123 11:15:35.919399 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" event={"ID":"0ba414e6-e845-4fe5-9f63-664e783ba9f0","Type":"ContainerDied","Data":"e4d243ffbd82cfab70c1b4a2d90c9394f6ee95016744e18cf204fde54906c2e1"} Jan 23 11:15:35 crc kubenswrapper[4689]: I0123 11:15:35.963013 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:15:36 crc kubenswrapper[4689]: I0123 11:15:36.941577 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mhxmh" event={"ID":"d6502814-424c-4bb5-bd7f-bd986f84c813","Type":"ContainerStarted","Data":"945a1ac05c331d59e690ce2f632d6dd87b6c746d3968c445da4f420da85210fd"} Jan 23 11:15:36 crc kubenswrapper[4689]: I0123 11:15:36.970381 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-mhxmh" podStartSLOduration=2.970366421 podStartE2EDuration="2.970366421s" podCreationTimestamp="2026-01-23 11:15:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:36.95947005 +0000 UTC m=+1601.584149919" watchObservedRunningTime="2026-01-23 11:15:36.970366421 +0000 UTC m=+1601.595046280" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.154346 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.273195 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvr49\" (UniqueName: \"kubernetes.io/projected/0ba414e6-e845-4fe5-9f63-664e783ba9f0-kube-api-access-vvr49\") pod \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.273376 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-config\") pod \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.273423 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-svc\") pod \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.273515 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-sb\") pod \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.273554 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-swift-storage-0\") pod \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.273698 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-nb\") pod \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\" (UID: \"0ba414e6-e845-4fe5-9f63-664e783ba9f0\") " Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.281046 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ba414e6-e845-4fe5-9f63-664e783ba9f0-kube-api-access-vvr49" (OuterVolumeSpecName: "kube-api-access-vvr49") pod "0ba414e6-e845-4fe5-9f63-664e783ba9f0" (UID: "0ba414e6-e845-4fe5-9f63-664e783ba9f0"). InnerVolumeSpecName "kube-api-access-vvr49". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.361036 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0ba414e6-e845-4fe5-9f63-664e783ba9f0" (UID: "0ba414e6-e845-4fe5-9f63-664e783ba9f0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.362566 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0ba414e6-e845-4fe5-9f63-664e783ba9f0" (UID: "0ba414e6-e845-4fe5-9f63-664e783ba9f0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.364763 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0ba414e6-e845-4fe5-9f63-664e783ba9f0" (UID: "0ba414e6-e845-4fe5-9f63-664e783ba9f0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.366219 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-config" (OuterVolumeSpecName: "config") pod "0ba414e6-e845-4fe5-9f63-664e783ba9f0" (UID: "0ba414e6-e845-4fe5-9f63-664e783ba9f0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.376858 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.376890 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvr49\" (UniqueName: \"kubernetes.io/projected/0ba414e6-e845-4fe5-9f63-664e783ba9f0-kube-api-access-vvr49\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.376905 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.376916 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.376926 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.381543 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0ba414e6-e845-4fe5-9f63-664e783ba9f0" (UID: "0ba414e6-e845-4fe5-9f63-664e783ba9f0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.479406 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ba414e6-e845-4fe5-9f63-664e783ba9f0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.960357 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerStarted","Data":"9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb"} Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.966903 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07f1eb7d-276b-4fe1-8546-e1f72bf09aca","Type":"ContainerStarted","Data":"e3c9a994e5155201ffbf20e8154bf8d031321ef875f6101eaafd73c163872476"} Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.971638 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.972321 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7877d89589-pf8mb" event={"ID":"0ba414e6-e845-4fe5-9f63-664e783ba9f0","Type":"ContainerDied","Data":"93ae80bcd44e00b58151b0474f2a34cb35c3e6c2708280e6cf11efcbfb7018d4"} Jan 23 11:15:37 crc kubenswrapper[4689]: I0123 11:15:37.972355 4689 scope.go:117] "RemoveContainer" containerID="e4d243ffbd82cfab70c1b4a2d90c9394f6ee95016744e18cf204fde54906c2e1" Jan 23 11:15:38 crc kubenswrapper[4689]: I0123 11:15:38.002467 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=8.002442765 podStartE2EDuration="8.002442765s" podCreationTimestamp="2026-01-23 11:15:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:37.988762424 +0000 UTC m=+1602.613442283" watchObservedRunningTime="2026-01-23 11:15:38.002442765 +0000 UTC m=+1602.627122624" Jan 23 11:15:38 crc kubenswrapper[4689]: I0123 11:15:38.116053 4689 scope.go:117] "RemoveContainer" containerID="d40a99b4157af27855f036cb1824f91cb317ed664f4a61c777d1fccf82f0b8fb" Jan 23 11:15:38 crc kubenswrapper[4689]: I0123 11:15:38.140437 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7877d89589-pf8mb"] Jan 23 11:15:38 crc kubenswrapper[4689]: I0123 11:15:38.181616 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7877d89589-pf8mb"] Jan 23 11:15:38 crc kubenswrapper[4689]: I0123 11:15:38.987097 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerStarted","Data":"8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9"} Jan 23 11:15:38 crc kubenswrapper[4689]: I0123 11:15:38.990684 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerStarted","Data":"bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e"} Jan 23 11:15:39 crc kubenswrapper[4689]: I0123 11:15:39.654138 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" path="/var/lib/kubelet/pods/0ba414e6-e845-4fe5-9f63-664e783ba9f0/volumes" Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.012963 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerStarted","Data":"6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb"} Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.013057 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-api" containerID="cri-o://4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8" gracePeriod=30 Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.013174 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-notifier" containerID="cri-o://bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e" gracePeriod=30 Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.013199 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-evaluator" containerID="cri-o://39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8" gracePeriod=30 Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.013236 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-listener" containerID="cri-o://6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb" gracePeriod=30 Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.017321 4689 generic.go:334] "Generic (PLEG): container finished" podID="d6502814-424c-4bb5-bd7f-bd986f84c813" containerID="945a1ac05c331d59e690ce2f632d6dd87b6c746d3968c445da4f420da85210fd" exitCode=0 Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.017432 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mhxmh" event={"ID":"d6502814-424c-4bb5-bd7f-bd986f84c813","Type":"ContainerDied","Data":"945a1ac05c331d59e690ce2f632d6dd87b6c746d3968c445da4f420da85210fd"} Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.023867 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerStarted","Data":"d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61"} Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.044840 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.263108509 podStartE2EDuration="26.044816985s" podCreationTimestamp="2026-01-23 11:15:15 +0000 UTC" firstStartedPulling="2026-01-23 11:15:16.636965189 +0000 UTC m=+1581.261645048" lastFinishedPulling="2026-01-23 11:15:40.418673665 +0000 UTC m=+1605.043353524" observedRunningTime="2026-01-23 11:15:41.036265072 +0000 UTC m=+1605.660944931" watchObservedRunningTime="2026-01-23 11:15:41.044816985 +0000 UTC m=+1605.669496844" Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.263377 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:15:41 crc kubenswrapper[4689]: I0123 11:15:41.263438 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.039097 4689 generic.go:334] "Generic (PLEG): container finished" podID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerID="bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e" exitCode=0 Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.039896 4689 generic.go:334] "Generic (PLEG): container finished" podID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerID="39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8" exitCode=0 Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.039961 4689 generic.go:334] "Generic (PLEG): container finished" podID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerID="4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8" exitCode=0 Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.040211 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerDied","Data":"bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e"} Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.040310 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerDied","Data":"39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8"} Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.040375 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerDied","Data":"4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8"} Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.280466 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.4:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.280473 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.4:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.547766 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.621905 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-scripts\") pod \"d6502814-424c-4bb5-bd7f-bd986f84c813\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.622025 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ln9jd\" (UniqueName: \"kubernetes.io/projected/d6502814-424c-4bb5-bd7f-bd986f84c813-kube-api-access-ln9jd\") pod \"d6502814-424c-4bb5-bd7f-bd986f84c813\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.622119 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-combined-ca-bundle\") pod \"d6502814-424c-4bb5-bd7f-bd986f84c813\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.622156 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-config-data\") pod \"d6502814-424c-4bb5-bd7f-bd986f84c813\" (UID: \"d6502814-424c-4bb5-bd7f-bd986f84c813\") " Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.632438 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6502814-424c-4bb5-bd7f-bd986f84c813-kube-api-access-ln9jd" (OuterVolumeSpecName: "kube-api-access-ln9jd") pod "d6502814-424c-4bb5-bd7f-bd986f84c813" (UID: "d6502814-424c-4bb5-bd7f-bd986f84c813"). InnerVolumeSpecName "kube-api-access-ln9jd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.646273 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-scripts" (OuterVolumeSpecName: "scripts") pod "d6502814-424c-4bb5-bd7f-bd986f84c813" (UID: "d6502814-424c-4bb5-bd7f-bd986f84c813"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.658688 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d6502814-424c-4bb5-bd7f-bd986f84c813" (UID: "d6502814-424c-4bb5-bd7f-bd986f84c813"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.697297 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-config-data" (OuterVolumeSpecName: "config-data") pod "d6502814-424c-4bb5-bd7f-bd986f84c813" (UID: "d6502814-424c-4bb5-bd7f-bd986f84c813"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.725316 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.725499 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.725617 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6502814-424c-4bb5-bd7f-bd986f84c813-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:42 crc kubenswrapper[4689]: I0123 11:15:42.725700 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ln9jd\" (UniqueName: \"kubernetes.io/projected/d6502814-424c-4bb5-bd7f-bd986f84c813-kube-api-access-ln9jd\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.059046 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mhxmh" event={"ID":"d6502814-424c-4bb5-bd7f-bd986f84c813","Type":"ContainerDied","Data":"f9915ad3c5f45d434659427e208edfbd550d5eb65d1740f00a0b1e56f45f2152"} Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.059414 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9915ad3c5f45d434659427e208edfbd550d5eb65d1740f00a0b1e56f45f2152" Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.059062 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mhxmh" Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.065018 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerStarted","Data":"4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76"} Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.065188 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-central-agent" containerID="cri-o://9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.065316 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="proxy-httpd" containerID="cri-o://4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.065369 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="sg-core" containerID="cri-o://d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.065408 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-notification-agent" containerID="cri-o://8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.065544 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.118065 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.085536407 podStartE2EDuration="15.118040549s" podCreationTimestamp="2026-01-23 11:15:28 +0000 UTC" firstStartedPulling="2026-01-23 11:15:29.780769934 +0000 UTC m=+1594.405449833" lastFinishedPulling="2026-01-23 11:15:42.813274116 +0000 UTC m=+1607.437953975" observedRunningTime="2026-01-23 11:15:43.095854296 +0000 UTC m=+1607.720534165" watchObservedRunningTime="2026-01-23 11:15:43.118040549 +0000 UTC m=+1607.742720448" Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.256599 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.256830 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d574253a-9d18-410d-8597-51eb7090584c" containerName="nova-scheduler-scheduler" containerID="cri-o://74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.274717 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.275110 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-log" containerID="cri-o://b1fdadfec2dd21fe29cfe215cc7c72e14a4715875cf30528f8ce8f8ac6637028" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.275323 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-api" containerID="cri-o://e3c9a994e5155201ffbf20e8154bf8d031321ef875f6101eaafd73c163872476" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.289785 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.290073 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-log" containerID="cri-o://5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782" gracePeriod=30 Jan 23 11:15:43 crc kubenswrapper[4689]: I0123 11:15:43.290230 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-metadata" containerID="cri-o://6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52" gracePeriod=30 Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.078117 4689 generic.go:334] "Generic (PLEG): container finished" podID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerID="b1fdadfec2dd21fe29cfe215cc7c72e14a4715875cf30528f8ce8f8ac6637028" exitCode=143 Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.078192 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07f1eb7d-276b-4fe1-8546-e1f72bf09aca","Type":"ContainerDied","Data":"b1fdadfec2dd21fe29cfe215cc7c72e14a4715875cf30528f8ce8f8ac6637028"} Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.083273 4689 generic.go:334] "Generic (PLEG): container finished" podID="228f582d-381b-4e5c-a71b-99f787468c36" containerID="d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61" exitCode=2 Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.083311 4689 generic.go:334] "Generic (PLEG): container finished" podID="228f582d-381b-4e5c-a71b-99f787468c36" containerID="8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9" exitCode=0 Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.083378 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerDied","Data":"d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61"} Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.083408 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerDied","Data":"8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9"} Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.087663 4689 generic.go:334] "Generic (PLEG): container finished" podID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerID="5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782" exitCode=143 Jan 23 11:15:44 crc kubenswrapper[4689]: I0123 11:15:44.087696 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fec438c-a06a-48cb-a875-9e79fd50b3ca","Type":"ContainerDied","Data":"5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782"} Jan 23 11:15:45 crc kubenswrapper[4689]: I0123 11:15:45.106949 4689 generic.go:334] "Generic (PLEG): container finished" podID="228f582d-381b-4e5c-a71b-99f787468c36" containerID="9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb" exitCode=0 Jan 23 11:15:45 crc kubenswrapper[4689]: I0123 11:15:45.107186 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerDied","Data":"9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb"} Jan 23 11:15:46 crc kubenswrapper[4689]: I0123 11:15:46.426900 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": read tcp 10.217.0.2:43684->10.217.0.250:8775: read: connection reset by peer" Jan 23 11:15:46 crc kubenswrapper[4689]: I0123 11:15:46.426920 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": read tcp 10.217.0.2:43686->10.217.0.250:8775: read: connection reset by peer" Jan 23 11:15:46 crc kubenswrapper[4689]: E0123 11:15:46.968686 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783 is running failed: container process not found" containerID="74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 11:15:46 crc kubenswrapper[4689]: E0123 11:15:46.969055 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783 is running failed: container process not found" containerID="74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 11:15:46 crc kubenswrapper[4689]: E0123 11:15:46.969297 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783 is running failed: container process not found" containerID="74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 11:15:46 crc kubenswrapper[4689]: E0123 11:15:46.969332 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="d574253a-9d18-410d-8597-51eb7090584c" containerName="nova-scheduler-scheduler" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.088845 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.134688 4689 generic.go:334] "Generic (PLEG): container finished" podID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerID="6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52" exitCode=0 Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.134758 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fec438c-a06a-48cb-a875-9e79fd50b3ca","Type":"ContainerDied","Data":"6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52"} Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.134798 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fec438c-a06a-48cb-a875-9e79fd50b3ca","Type":"ContainerDied","Data":"4cbdb6c2929300d0bfef471a792415010f0a5577e506ef01a20641f54243906c"} Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.134818 4689 scope.go:117] "RemoveContainer" containerID="6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.134969 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.137751 4689 generic.go:334] "Generic (PLEG): container finished" podID="d574253a-9d18-410d-8597-51eb7090584c" containerID="74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783" exitCode=0 Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.137888 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d574253a-9d18-410d-8597-51eb7090584c","Type":"ContainerDied","Data":"74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783"} Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.164357 4689 scope.go:117] "RemoveContainer" containerID="5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.191604 4689 scope.go:117] "RemoveContainer" containerID="6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.192220 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52\": container with ID starting with 6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52 not found: ID does not exist" containerID="6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.192251 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52"} err="failed to get container status \"6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52\": rpc error: code = NotFound desc = could not find container \"6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52\": container with ID starting with 6cd8c742e01cf42d89fac14a363082e648e5c0c1d675a065113d7f735d3b1b52 not found: ID does not exist" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.192274 4689 scope.go:117] "RemoveContainer" containerID="5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.192519 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782\": container with ID starting with 5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782 not found: ID does not exist" containerID="5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.192547 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782"} err="failed to get container status \"5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782\": rpc error: code = NotFound desc = could not find container \"5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782\": container with ID starting with 5387446ddb140aff534b0bd5c2bcc28fe2888100f26a82d4e79b09a659aba782 not found: ID does not exist" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.229078 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-config-data\") pod \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.229129 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-nova-metadata-tls-certs\") pod \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.229650 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9b4t\" (UniqueName: \"kubernetes.io/projected/6fec438c-a06a-48cb-a875-9e79fd50b3ca-kube-api-access-z9b4t\") pod \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.229736 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-combined-ca-bundle\") pod \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.229846 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fec438c-a06a-48cb-a875-9e79fd50b3ca-logs\") pod \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\" (UID: \"6fec438c-a06a-48cb-a875-9e79fd50b3ca\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.231343 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fec438c-a06a-48cb-a875-9e79fd50b3ca-logs" (OuterVolumeSpecName: "logs") pod "6fec438c-a06a-48cb-a875-9e79fd50b3ca" (UID: "6fec438c-a06a-48cb-a875-9e79fd50b3ca"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.234928 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fec438c-a06a-48cb-a875-9e79fd50b3ca-kube-api-access-z9b4t" (OuterVolumeSpecName: "kube-api-access-z9b4t") pod "6fec438c-a06a-48cb-a875-9e79fd50b3ca" (UID: "6fec438c-a06a-48cb-a875-9e79fd50b3ca"). InnerVolumeSpecName "kube-api-access-z9b4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.272458 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fec438c-a06a-48cb-a875-9e79fd50b3ca" (UID: "6fec438c-a06a-48cb-a875-9e79fd50b3ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.272490 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-config-data" (OuterVolumeSpecName: "config-data") pod "6fec438c-a06a-48cb-a875-9e79fd50b3ca" (UID: "6fec438c-a06a-48cb-a875-9e79fd50b3ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.308874 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "6fec438c-a06a-48cb-a875-9e79fd50b3ca" (UID: "6fec438c-a06a-48cb-a875-9e79fd50b3ca"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.333400 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9b4t\" (UniqueName: \"kubernetes.io/projected/6fec438c-a06a-48cb-a875-9e79fd50b3ca-kube-api-access-z9b4t\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.333655 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.333748 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fec438c-a06a-48cb-a875-9e79fd50b3ca-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.333818 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.333891 4689 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fec438c-a06a-48cb-a875-9e79fd50b3ca-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.381703 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.434561 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-config-data\") pod \"d574253a-9d18-410d-8597-51eb7090584c\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.434635 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-combined-ca-bundle\") pod \"d574253a-9d18-410d-8597-51eb7090584c\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.434799 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfqpq\" (UniqueName: \"kubernetes.io/projected/d574253a-9d18-410d-8597-51eb7090584c-kube-api-access-rfqpq\") pod \"d574253a-9d18-410d-8597-51eb7090584c\" (UID: \"d574253a-9d18-410d-8597-51eb7090584c\") " Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.438887 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d574253a-9d18-410d-8597-51eb7090584c-kube-api-access-rfqpq" (OuterVolumeSpecName: "kube-api-access-rfqpq") pod "d574253a-9d18-410d-8597-51eb7090584c" (UID: "d574253a-9d18-410d-8597-51eb7090584c"). InnerVolumeSpecName "kube-api-access-rfqpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.473875 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-config-data" (OuterVolumeSpecName: "config-data") pod "d574253a-9d18-410d-8597-51eb7090584c" (UID: "d574253a-9d18-410d-8597-51eb7090584c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.474982 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d574253a-9d18-410d-8597-51eb7090584c" (UID: "d574253a-9d18-410d-8597-51eb7090584c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.509348 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.523642 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.534378 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.534858 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6502814-424c-4bb5-bd7f-bd986f84c813" containerName="nova-manage" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.534875 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6502814-424c-4bb5-bd7f-bd986f84c813" containerName="nova-manage" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.534906 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerName="init" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.534912 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerName="init" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.534920 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-metadata" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.534926 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-metadata" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.534944 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-log" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.534951 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-log" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.534969 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d574253a-9d18-410d-8597-51eb7090584c" containerName="nova-scheduler-scheduler" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.534975 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d574253a-9d18-410d-8597-51eb7090584c" containerName="nova-scheduler-scheduler" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.534989 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerName="dnsmasq-dns" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.534994 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerName="dnsmasq-dns" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.535251 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6502814-424c-4bb5-bd7f-bd986f84c813" containerName="nova-manage" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.535267 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d574253a-9d18-410d-8597-51eb7090584c" containerName="nova-scheduler-scheduler" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.535288 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-log" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.535304 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ba414e6-e845-4fe5-9f63-664e783ba9f0" containerName="dnsmasq-dns" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.535318 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" containerName="nova-metadata-metadata" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.536511 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.538776 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.538822 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.538850 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d574253a-9d18-410d-8597-51eb7090584c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.538863 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfqpq\" (UniqueName: \"kubernetes.io/projected/d574253a-9d18-410d-8597-51eb7090584c-kube-api-access-rfqpq\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.538999 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.549902 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.640967 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:15:47 crc kubenswrapper[4689]: E0123 11:15:47.641340 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.641511 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qkmd\" (UniqueName: \"kubernetes.io/projected/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-kube-api-access-5qkmd\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.641577 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-config-data\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.641629 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.641777 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-logs\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.641876 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.656170 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fec438c-a06a-48cb-a875-9e79fd50b3ca" path="/var/lib/kubelet/pods/6fec438c-a06a-48cb-a875-9e79fd50b3ca/volumes" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.744455 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-logs\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.744531 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.744752 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qkmd\" (UniqueName: \"kubernetes.io/projected/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-kube-api-access-5qkmd\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.744854 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-config-data\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.744908 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.745969 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-logs\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.748995 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.749378 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.758363 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-config-data\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.762943 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qkmd\" (UniqueName: \"kubernetes.io/projected/238926d6-2fb4-4759-9ef9-e93cca2c4bb0-kube-api-access-5qkmd\") pod \"nova-metadata-0\" (UID: \"238926d6-2fb4-4759-9ef9-e93cca2c4bb0\") " pod="openstack/nova-metadata-0" Jan 23 11:15:47 crc kubenswrapper[4689]: I0123 11:15:47.866720 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.173665 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07f1eb7d-276b-4fe1-8546-e1f72bf09aca","Type":"ContainerDied","Data":"e3c9a994e5155201ffbf20e8154bf8d031321ef875f6101eaafd73c163872476"} Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.173660 4689 generic.go:334] "Generic (PLEG): container finished" podID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerID="e3c9a994e5155201ffbf20e8154bf8d031321ef875f6101eaafd73c163872476" exitCode=0 Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.178347 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d574253a-9d18-410d-8597-51eb7090584c","Type":"ContainerDied","Data":"ef82f1519fcf45705163b4da24135b498439aa3297229cd378b47e3313c183c4"} Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.178391 4689 scope.go:117] "RemoveContainer" containerID="74009afc890eeecfd2637032dfe119bad353af9e4111fd393f606f6417aff783" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.178500 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.226063 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.227640 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.242903 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.295951 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:48 crc kubenswrapper[4689]: E0123 11:15:48.299349 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-log" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.299593 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-log" Jan 23 11:15:48 crc kubenswrapper[4689]: E0123 11:15:48.299632 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-api" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.299647 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-api" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.303575 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-api" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.303642 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" containerName="nova-api-log" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.309385 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.312326 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.317215 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.401270 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-combined-ca-bundle\") pod \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.401481 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-public-tls-certs\") pod \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.401513 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-internal-tls-certs\") pod \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.401549 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-logs\") pod \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.401889 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6hn2\" (UniqueName: \"kubernetes.io/projected/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-kube-api-access-z6hn2\") pod \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.402007 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-config-data\") pod \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\" (UID: \"07f1eb7d-276b-4fe1-8546-e1f72bf09aca\") " Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.402315 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-logs" (OuterVolumeSpecName: "logs") pod "07f1eb7d-276b-4fe1-8546-e1f72bf09aca" (UID: "07f1eb7d-276b-4fe1-8546-e1f72bf09aca"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.402540 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzsvn\" (UniqueName: \"kubernetes.io/projected/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-kube-api-access-dzsvn\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.402838 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.402945 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-config-data\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.403381 4689 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-logs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.406734 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-kube-api-access-z6hn2" (OuterVolumeSpecName: "kube-api-access-z6hn2") pod "07f1eb7d-276b-4fe1-8546-e1f72bf09aca" (UID: "07f1eb7d-276b-4fe1-8546-e1f72bf09aca"). InnerVolumeSpecName "kube-api-access-z6hn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.433774 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07f1eb7d-276b-4fe1-8546-e1f72bf09aca" (UID: "07f1eb7d-276b-4fe1-8546-e1f72bf09aca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.436576 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-config-data" (OuterVolumeSpecName: "config-data") pod "07f1eb7d-276b-4fe1-8546-e1f72bf09aca" (UID: "07f1eb7d-276b-4fe1-8546-e1f72bf09aca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.457436 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "07f1eb7d-276b-4fe1-8546-e1f72bf09aca" (UID: "07f1eb7d-276b-4fe1-8546-e1f72bf09aca"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.477970 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "07f1eb7d-276b-4fe1-8546-e1f72bf09aca" (UID: "07f1eb7d-276b-4fe1-8546-e1f72bf09aca"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.484396 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.505610 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.505883 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-config-data\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.506091 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzsvn\" (UniqueName: \"kubernetes.io/projected/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-kube-api-access-dzsvn\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.506352 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.506415 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.506472 4689 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.506532 4689 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.506613 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6hn2\" (UniqueName: \"kubernetes.io/projected/07f1eb7d-276b-4fe1-8546-e1f72bf09aca-kube-api-access-z6hn2\") on node \"crc\" DevicePath \"\"" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.510459 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.522716 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-config-data\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.530173 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzsvn\" (UniqueName: \"kubernetes.io/projected/6ba90fb7-d664-4eb6-90db-4ad3909ebfbf-kube-api-access-dzsvn\") pod \"nova-scheduler-0\" (UID: \"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf\") " pod="openstack/nova-scheduler-0" Jan 23 11:15:48 crc kubenswrapper[4689]: I0123 11:15:48.642575 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.141545 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.188871 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf","Type":"ContainerStarted","Data":"ee89849e62023e0c1b50d32c56a1e7bbf44e6a1fbba225cb4bee789a4152b847"} Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.190393 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"238926d6-2fb4-4759-9ef9-e93cca2c4bb0","Type":"ContainerStarted","Data":"ddd87eaca1c9f4f5b104c17c96bc902cb194528d705d4b1dd1f55ab4d5c085ec"} Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.190505 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"238926d6-2fb4-4759-9ef9-e93cca2c4bb0","Type":"ContainerStarted","Data":"b7f9f4103df7acd04dab0b88770f8f8fd96dc282f78181bf1586b59049d6035c"} Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.192838 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07f1eb7d-276b-4fe1-8546-e1f72bf09aca","Type":"ContainerDied","Data":"97d84b5c4c7593fca7777a3a14875f1320df8c08f375f98cca6c77ea74c352b0"} Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.192904 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.192989 4689 scope.go:117] "RemoveContainer" containerID="e3c9a994e5155201ffbf20e8154bf8d031321ef875f6101eaafd73c163872476" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.217559 4689 scope.go:117] "RemoveContainer" containerID="b1fdadfec2dd21fe29cfe215cc7c72e14a4715875cf30528f8ce8f8ac6637028" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.262238 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.271162 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.282708 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.285072 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.294934 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.303363 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.303465 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.303495 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.333190 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-public-tls-certs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.333257 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfjql\" (UniqueName: \"kubernetes.io/projected/62f78bd0-c290-481f-9678-8acf00f77fe2-kube-api-access-cfjql\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.333539 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.333632 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.333662 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62f78bd0-c290-481f-9678-8acf00f77fe2-logs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.333812 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-config-data\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.435345 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.435403 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.435433 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62f78bd0-c290-481f-9678-8acf00f77fe2-logs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.435504 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-config-data\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.435594 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-public-tls-certs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.435659 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfjql\" (UniqueName: \"kubernetes.io/projected/62f78bd0-c290-481f-9678-8acf00f77fe2-kube-api-access-cfjql\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.436202 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62f78bd0-c290-481f-9678-8acf00f77fe2-logs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.440071 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-config-data\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.440185 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.442466 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-public-tls-certs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.442914 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62f78bd0-c290-481f-9678-8acf00f77fe2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.452352 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfjql\" (UniqueName: \"kubernetes.io/projected/62f78bd0-c290-481f-9678-8acf00f77fe2-kube-api-access-cfjql\") pod \"nova-api-0\" (UID: \"62f78bd0-c290-481f-9678-8acf00f77fe2\") " pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.615689 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.660506 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07f1eb7d-276b-4fe1-8546-e1f72bf09aca" path="/var/lib/kubelet/pods/07f1eb7d-276b-4fe1-8546-e1f72bf09aca/volumes" Jan 23 11:15:49 crc kubenswrapper[4689]: I0123 11:15:49.661398 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d574253a-9d18-410d-8597-51eb7090584c" path="/var/lib/kubelet/pods/d574253a-9d18-410d-8597-51eb7090584c/volumes" Jan 23 11:15:50 crc kubenswrapper[4689]: I0123 11:15:50.083092 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 11:15:50 crc kubenswrapper[4689]: I0123 11:15:50.207123 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6ba90fb7-d664-4eb6-90db-4ad3909ebfbf","Type":"ContainerStarted","Data":"83c32a826cf5c2e89c62f73f19e58a200fc5e7ff6d0bc861c54f9a8fed469ae0"} Jan 23 11:15:50 crc kubenswrapper[4689]: I0123 11:15:50.211316 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"238926d6-2fb4-4759-9ef9-e93cca2c4bb0","Type":"ContainerStarted","Data":"14be47e4ed976af35f27d9e0d22138d4d32dd208587ededb769f3f42f02136dc"} Jan 23 11:15:50 crc kubenswrapper[4689]: I0123 11:15:50.213887 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62f78bd0-c290-481f-9678-8acf00f77fe2","Type":"ContainerStarted","Data":"18b20b77182fe06b620faf0a5da15e33f7423b3f4ef16c4931b47e72f5cfafc5"} Jan 23 11:15:50 crc kubenswrapper[4689]: I0123 11:15:50.232617 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.232601386 podStartE2EDuration="2.232601386s" podCreationTimestamp="2026-01-23 11:15:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:50.21953369 +0000 UTC m=+1614.844213559" watchObservedRunningTime="2026-01-23 11:15:50.232601386 +0000 UTC m=+1614.857281235" Jan 23 11:15:50 crc kubenswrapper[4689]: I0123 11:15:50.247636 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.247616361 podStartE2EDuration="3.247616361s" podCreationTimestamp="2026-01-23 11:15:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:50.239541119 +0000 UTC m=+1614.864220978" watchObservedRunningTime="2026-01-23 11:15:50.247616361 +0000 UTC m=+1614.872296220" Jan 23 11:15:51 crc kubenswrapper[4689]: I0123 11:15:51.260165 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62f78bd0-c290-481f-9678-8acf00f77fe2","Type":"ContainerStarted","Data":"ce30dc48fa6a7ab2153e3eeb94e8b6df8f130dc278594dd37b146970b229ca77"} Jan 23 11:15:51 crc kubenswrapper[4689]: I0123 11:15:51.260930 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62f78bd0-c290-481f-9678-8acf00f77fe2","Type":"ContainerStarted","Data":"4e880fa15ba0af805f887be92872564014187336e4a25a9325f8d0f647a208ba"} Jan 23 11:15:51 crc kubenswrapper[4689]: I0123 11:15:51.290692 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.290674188 podStartE2EDuration="2.290674188s" podCreationTimestamp="2026-01-23 11:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:15:51.285250383 +0000 UTC m=+1615.909930262" watchObservedRunningTime="2026-01-23 11:15:51.290674188 +0000 UTC m=+1615.915354047" Jan 23 11:15:52 crc kubenswrapper[4689]: I0123 11:15:52.868319 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:15:52 crc kubenswrapper[4689]: I0123 11:15:52.868647 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 11:15:53 crc kubenswrapper[4689]: I0123 11:15:53.651960 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 23 11:15:57 crc kubenswrapper[4689]: I0123 11:15:57.868098 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 11:15:57 crc kubenswrapper[4689]: I0123 11:15:57.868850 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 11:15:58 crc kubenswrapper[4689]: I0123 11:15:58.640746 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:15:58 crc kubenswrapper[4689]: E0123 11:15:58.641560 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:15:58 crc kubenswrapper[4689]: I0123 11:15:58.642933 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 23 11:15:58 crc kubenswrapper[4689]: I0123 11:15:58.678670 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 23 11:15:58 crc kubenswrapper[4689]: I0123 11:15:58.880368 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="238926d6-2fb4-4759-9ef9-e93cca2c4bb0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:58 crc kubenswrapper[4689]: I0123 11:15:58.880381 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="238926d6-2fb4-4759-9ef9-e93cca2c4bb0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:15:59 crc kubenswrapper[4689]: I0123 11:15:59.327043 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 11:15:59 crc kubenswrapper[4689]: I0123 11:15:59.417992 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 23 11:15:59 crc kubenswrapper[4689]: I0123 11:15:59.616322 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:15:59 crc kubenswrapper[4689]: I0123 11:15:59.616406 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 11:16:00 crc kubenswrapper[4689]: I0123 11:16:00.663350 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="62f78bd0-c290-481f-9678-8acf00f77fe2" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:16:00 crc kubenswrapper[4689]: I0123 11:16:00.663354 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="62f78bd0-c290-481f-9678-8acf00f77fe2" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 11:16:07 crc kubenswrapper[4689]: I0123 11:16:07.875580 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 11:16:07 crc kubenswrapper[4689]: I0123 11:16:07.879863 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 11:16:07 crc kubenswrapper[4689]: I0123 11:16:07.884554 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 11:16:08 crc kubenswrapper[4689]: I0123 11:16:08.487099 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 11:16:09 crc kubenswrapper[4689]: I0123 11:16:09.753872 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 11:16:09 crc kubenswrapper[4689]: I0123 11:16:09.754771 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 11:16:09 crc kubenswrapper[4689]: I0123 11:16:09.796244 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 11:16:09 crc kubenswrapper[4689]: I0123 11:16:09.799667 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 11:16:10 crc kubenswrapper[4689]: I0123 11:16:10.516747 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 11:16:10 crc kubenswrapper[4689]: I0123 11:16:10.524069 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.501264 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.596108 4689 generic.go:334] "Generic (PLEG): container finished" podID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerID="6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb" exitCode=137 Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.597001 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.597468 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerDied","Data":"6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb"} Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.597507 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"625db9a8-a775-48bc-923b-69b6cd0c3e7d","Type":"ContainerDied","Data":"f3732e8edeb61790aac74cca04a7e4d8ceca7e7229aada097acf22ebfcc12c32"} Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.597528 4689 scope.go:117] "RemoveContainer" containerID="6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.619388 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6mqw\" (UniqueName: \"kubernetes.io/projected/625db9a8-a775-48bc-923b-69b6cd0c3e7d-kube-api-access-z6mqw\") pod \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.619517 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-combined-ca-bundle\") pod \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.619684 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-config-data\") pod \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.619711 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-scripts\") pod \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\" (UID: \"625db9a8-a775-48bc-923b-69b6cd0c3e7d\") " Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.627369 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-scripts" (OuterVolumeSpecName: "scripts") pod "625db9a8-a775-48bc-923b-69b6cd0c3e7d" (UID: "625db9a8-a775-48bc-923b-69b6cd0c3e7d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.635912 4689 scope.go:117] "RemoveContainer" containerID="bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.649623 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/625db9a8-a775-48bc-923b-69b6cd0c3e7d-kube-api-access-z6mqw" (OuterVolumeSpecName: "kube-api-access-z6mqw") pod "625db9a8-a775-48bc-923b-69b6cd0c3e7d" (UID: "625db9a8-a775-48bc-923b-69b6cd0c3e7d"). InnerVolumeSpecName "kube-api-access-z6mqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.728862 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.728892 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6mqw\" (UniqueName: \"kubernetes.io/projected/625db9a8-a775-48bc-923b-69b6cd0c3e7d-kube-api-access-z6mqw\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.769042 4689 scope.go:117] "RemoveContainer" containerID="39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.807394 4689 scope.go:117] "RemoveContainer" containerID="4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.811887 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-config-data" (OuterVolumeSpecName: "config-data") pod "625db9a8-a775-48bc-923b-69b6cd0c3e7d" (UID: "625db9a8-a775-48bc-923b-69b6cd0c3e7d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.813278 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "625db9a8-a775-48bc-923b-69b6cd0c3e7d" (UID: "625db9a8-a775-48bc-923b-69b6cd0c3e7d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.830814 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.830842 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/625db9a8-a775-48bc-923b-69b6cd0c3e7d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.833883 4689 scope.go:117] "RemoveContainer" containerID="6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb" Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.836748 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb\": container with ID starting with 6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb not found: ID does not exist" containerID="6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.836790 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb"} err="failed to get container status \"6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb\": rpc error: code = NotFound desc = could not find container \"6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb\": container with ID starting with 6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb not found: ID does not exist" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.836815 4689 scope.go:117] "RemoveContainer" containerID="bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e" Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.837219 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e\": container with ID starting with bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e not found: ID does not exist" containerID="bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.837853 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e"} err="failed to get container status \"bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e\": rpc error: code = NotFound desc = could not find container \"bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e\": container with ID starting with bb0f398b8457e54efc3dc52b085e140270515cd03788bf153920ba6b41a0a46e not found: ID does not exist" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.837869 4689 scope.go:117] "RemoveContainer" containerID="39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8" Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.838216 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8\": container with ID starting with 39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8 not found: ID does not exist" containerID="39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.838238 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8"} err="failed to get container status \"39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8\": rpc error: code = NotFound desc = could not find container \"39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8\": container with ID starting with 39f55461fc295fef4c12d7d0e089f805d55465a83033b982cfcecf9bad1914b8 not found: ID does not exist" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.838256 4689 scope.go:117] "RemoveContainer" containerID="4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8" Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.838699 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8\": container with ID starting with 4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8 not found: ID does not exist" containerID="4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.838829 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8"} err="failed to get container status \"4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8\": rpc error: code = NotFound desc = could not find container \"4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8\": container with ID starting with 4aeef86962ebb853cf6c74779c427a6528572f913b20916b99eaef038293e1e8 not found: ID does not exist" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.954696 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.972818 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.996942 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.997605 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-evaluator" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997628 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-evaluator" Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.997647 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-listener" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997658 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-listener" Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.997688 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-notifier" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997694 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-notifier" Jan 23 11:16:11 crc kubenswrapper[4689]: E0123 11:16:11.997717 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-api" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997723 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-api" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997939 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-api" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997970 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-listener" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997983 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-notifier" Jan 23 11:16:11 crc kubenswrapper[4689]: I0123 11:16:11.997992 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" containerName="aodh-evaluator" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.000495 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.004620 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-hvsrp" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.004802 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.004802 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.005954 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.006199 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.013591 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.142754 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-internal-tls-certs\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.143182 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-config-data\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.143222 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-public-tls-certs\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.143274 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-combined-ca-bundle\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.143295 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdggn\" (UniqueName: \"kubernetes.io/projected/6b41a12f-8d19-4604-9105-8d833e25c268-kube-api-access-cdggn\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.143319 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-scripts\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.245693 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-internal-tls-certs\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.245779 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-config-data\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.245809 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-public-tls-certs\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.245856 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-combined-ca-bundle\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.245872 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdggn\" (UniqueName: \"kubernetes.io/projected/6b41a12f-8d19-4604-9105-8d833e25c268-kube-api-access-cdggn\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.245893 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-scripts\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.250388 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-scripts\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.250581 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-internal-tls-certs\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.250759 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-combined-ca-bundle\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.251328 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-public-tls-certs\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.257589 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-config-data\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.264473 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdggn\" (UniqueName: \"kubernetes.io/projected/6b41a12f-8d19-4604-9105-8d833e25c268-kube-api-access-cdggn\") pod \"aodh-0\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.318443 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.836045 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 23 11:16:12 crc kubenswrapper[4689]: I0123 11:16:12.852993 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:16:13 crc kubenswrapper[4689]: E0123 11:16:13.277986 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod625db9a8_a775_48bc_923b_69b6cd0c3e7d.slice/crio-conmon-6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod228f582d_381b_4e5c_a71b_99f787468c36.slice/crio-4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod625db9a8_a775_48bc_923b_69b6cd0c3e7d.slice/crio-f3732e8edeb61790aac74cca04a7e4d8ceca7e7229aada097acf22ebfcc12c32\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod625db9a8_a775_48bc_923b_69b6cd0c3e7d.slice/crio-6aa34ff57179e1888065bd3c7b0a076071555c6c0a255ee035da9c305e14ccfb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod625db9a8_a775_48bc_923b_69b6cd0c3e7d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod228f582d_381b_4e5c_a71b_99f787468c36.slice/crio-conmon-4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:16:13 crc kubenswrapper[4689]: E0123 11:16:13.278107 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod228f582d_381b_4e5c_a71b_99f787468c36.slice/crio-conmon-4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.617889 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.624499 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerStarted","Data":"fe96681d14f358de3fae44a2b21b2678c97a92178a55412bf972784ae03457fa"} Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.630591 4689 generic.go:334] "Generic (PLEG): container finished" podID="228f582d-381b-4e5c-a71b-99f787468c36" containerID="4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76" exitCode=137 Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.630777 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerDied","Data":"4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76"} Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.630862 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"228f582d-381b-4e5c-a71b-99f787468c36","Type":"ContainerDied","Data":"32554ba526d6aa06bf1aa17cc212504eb59c1390a6c7979b04b6f6cfd74a9954"} Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.630951 4689 scope.go:117] "RemoveContainer" containerID="4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.631076 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.643755 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:16:13 crc kubenswrapper[4689]: E0123 11:16:13.644023 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.657909 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="625db9a8-a775-48bc-923b-69b6cd0c3e7d" path="/var/lib/kubelet/pods/625db9a8-a775-48bc-923b-69b6cd0c3e7d/volumes" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.683073 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqp7s\" (UniqueName: \"kubernetes.io/projected/228f582d-381b-4e5c-a71b-99f787468c36-kube-api-access-dqp7s\") pod \"228f582d-381b-4e5c-a71b-99f787468c36\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.683459 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-run-httpd\") pod \"228f582d-381b-4e5c-a71b-99f787468c36\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.683552 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-combined-ca-bundle\") pod \"228f582d-381b-4e5c-a71b-99f787468c36\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.683643 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-log-httpd\") pod \"228f582d-381b-4e5c-a71b-99f787468c36\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.683803 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-sg-core-conf-yaml\") pod \"228f582d-381b-4e5c-a71b-99f787468c36\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.683913 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-config-data\") pod \"228f582d-381b-4e5c-a71b-99f787468c36\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.684246 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-scripts\") pod \"228f582d-381b-4e5c-a71b-99f787468c36\" (UID: \"228f582d-381b-4e5c-a71b-99f787468c36\") " Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.689402 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "228f582d-381b-4e5c-a71b-99f787468c36" (UID: "228f582d-381b-4e5c-a71b-99f787468c36"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.690056 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "228f582d-381b-4e5c-a71b-99f787468c36" (UID: "228f582d-381b-4e5c-a71b-99f787468c36"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.691290 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/228f582d-381b-4e5c-a71b-99f787468c36-kube-api-access-dqp7s" (OuterVolumeSpecName: "kube-api-access-dqp7s") pod "228f582d-381b-4e5c-a71b-99f787468c36" (UID: "228f582d-381b-4e5c-a71b-99f787468c36"). InnerVolumeSpecName "kube-api-access-dqp7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.696202 4689 scope.go:117] "RemoveContainer" containerID="d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.696129 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-scripts" (OuterVolumeSpecName: "scripts") pod "228f582d-381b-4e5c-a71b-99f787468c36" (UID: "228f582d-381b-4e5c-a71b-99f787468c36"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.753966 4689 scope.go:117] "RemoveContainer" containerID="8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.790610 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.792702 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqp7s\" (UniqueName: \"kubernetes.io/projected/228f582d-381b-4e5c-a71b-99f787468c36-kube-api-access-dqp7s\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.792743 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.792753 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/228f582d-381b-4e5c-a71b-99f787468c36-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.815247 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "228f582d-381b-4e5c-a71b-99f787468c36" (UID: "228f582d-381b-4e5c-a71b-99f787468c36"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.816593 4689 scope.go:117] "RemoveContainer" containerID="9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.835918 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "228f582d-381b-4e5c-a71b-99f787468c36" (UID: "228f582d-381b-4e5c-a71b-99f787468c36"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.838868 4689 scope.go:117] "RemoveContainer" containerID="4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76" Jan 23 11:16:13 crc kubenswrapper[4689]: E0123 11:16:13.839362 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76\": container with ID starting with 4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76 not found: ID does not exist" containerID="4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.839405 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76"} err="failed to get container status \"4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76\": rpc error: code = NotFound desc = could not find container \"4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76\": container with ID starting with 4c0b45017738d65b0a7859ff9923ca22c0f25a47a169a9497ee722ec7f2fca76 not found: ID does not exist" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.839430 4689 scope.go:117] "RemoveContainer" containerID="d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61" Jan 23 11:16:13 crc kubenswrapper[4689]: E0123 11:16:13.839700 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61\": container with ID starting with d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61 not found: ID does not exist" containerID="d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.839717 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61"} err="failed to get container status \"d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61\": rpc error: code = NotFound desc = could not find container \"d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61\": container with ID starting with d12566f0d0e15eb7feef64d34a1c47b55bb430fe3e523cee4f3b26429fe08f61 not found: ID does not exist" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.839728 4689 scope.go:117] "RemoveContainer" containerID="8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9" Jan 23 11:16:13 crc kubenswrapper[4689]: E0123 11:16:13.839920 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9\": container with ID starting with 8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9 not found: ID does not exist" containerID="8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.839935 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9"} err="failed to get container status \"8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9\": rpc error: code = NotFound desc = could not find container \"8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9\": container with ID starting with 8dfad90db9f66a6f252c6d5b2829426055b737228de8c6af65f6563fdf0090f9 not found: ID does not exist" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.839947 4689 scope.go:117] "RemoveContainer" containerID="9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb" Jan 23 11:16:13 crc kubenswrapper[4689]: E0123 11:16:13.840119 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb\": container with ID starting with 9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb not found: ID does not exist" containerID="9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.840135 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb"} err="failed to get container status \"9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb\": rpc error: code = NotFound desc = could not find container \"9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb\": container with ID starting with 9f65717d776a73fc21cd47d3d89306251d4986b26433140f1b7ba89da248cebb not found: ID does not exist" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.905043 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.905083 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.915249 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-config-data" (OuterVolumeSpecName: "config-data") pod "228f582d-381b-4e5c-a71b-99f787468c36" (UID: "228f582d-381b-4e5c-a71b-99f787468c36"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.973404 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:13 crc kubenswrapper[4689]: I0123 11:16:13.990798 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.004668 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:14 crc kubenswrapper[4689]: E0123 11:16:14.005465 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-notification-agent" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005477 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-notification-agent" Jan 23 11:16:14 crc kubenswrapper[4689]: E0123 11:16:14.005494 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="sg-core" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005500 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="sg-core" Jan 23 11:16:14 crc kubenswrapper[4689]: E0123 11:16:14.005535 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-central-agent" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005542 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-central-agent" Jan 23 11:16:14 crc kubenswrapper[4689]: E0123 11:16:14.005568 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="proxy-httpd" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005573 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="proxy-httpd" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005789 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-central-agent" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005802 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="sg-core" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005822 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="ceilometer-notification-agent" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.005841 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="228f582d-381b-4e5c-a71b-99f787468c36" containerName="proxy-httpd" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.009953 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.010106 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228f582d-381b-4e5c-a71b-99f787468c36-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.014279 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.014392 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.041206 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.112170 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-log-httpd\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.112361 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-config-data\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.112425 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.112502 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-run-httpd\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.112651 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.112687 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-scripts\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.112754 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vp9k\" (UniqueName: \"kubernetes.io/projected/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-kube-api-access-7vp9k\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.214991 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vp9k\" (UniqueName: \"kubernetes.io/projected/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-kube-api-access-7vp9k\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215094 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-log-httpd\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215212 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-config-data\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215257 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215274 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-run-httpd\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215379 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215408 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-scripts\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215531 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-log-httpd\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.215980 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-run-httpd\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.220282 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-scripts\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.220496 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.220710 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-config-data\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.227678 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.231053 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vp9k\" (UniqueName: \"kubernetes.io/projected/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-kube-api-access-7vp9k\") pod \"ceilometer-0\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.350616 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.652280 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerStarted","Data":"a6fa46dda54d4ed6635c14115c0b6b4f1a6c42f9b8add809b4c4c971dbf09770"} Jan 23 11:16:14 crc kubenswrapper[4689]: I0123 11:16:14.899951 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:15 crc kubenswrapper[4689]: I0123 11:16:15.658846 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="228f582d-381b-4e5c-a71b-99f787468c36" path="/var/lib/kubelet/pods/228f582d-381b-4e5c-a71b-99f787468c36/volumes" Jan 23 11:16:15 crc kubenswrapper[4689]: I0123 11:16:15.670526 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerStarted","Data":"fb8f34e239b8e2e87141d2a0459740b0f68b78f77665e440b49dad16ef547bf6"} Jan 23 11:16:15 crc kubenswrapper[4689]: I0123 11:16:15.672326 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerStarted","Data":"7683f2e0fcf5fded7c0a9595c05cb3ad7168ecc1729634ba6fb3f48343c9a849"} Jan 23 11:16:16 crc kubenswrapper[4689]: I0123 11:16:16.683778 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerStarted","Data":"e44c9b081b1be96d0028ac482eca3ad1922b272a7f17eae9d02f031ea67d8f10"} Jan 23 11:16:16 crc kubenswrapper[4689]: I0123 11:16:16.686643 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerStarted","Data":"b3eb9888537db550255e9740628446c590f1bf10711b85c6439a1820f8508b9a"} Jan 23 11:16:19 crc kubenswrapper[4689]: I0123 11:16:19.720401 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerStarted","Data":"dbcfe4b3631f3b06f1321ff9886d5690fa756317c085de7396feba6e6e7ca0fe"} Jan 23 11:16:19 crc kubenswrapper[4689]: I0123 11:16:19.723976 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerStarted","Data":"d7eb457d141daaffcbc25cecd6691b58c561f23f0d41bf8a66bfbe7e221f9ae4"} Jan 23 11:16:19 crc kubenswrapper[4689]: I0123 11:16:19.758380 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.069837093 podStartE2EDuration="8.758349285s" podCreationTimestamp="2026-01-23 11:16:11 +0000 UTC" firstStartedPulling="2026-01-23 11:16:12.852765126 +0000 UTC m=+1637.477444985" lastFinishedPulling="2026-01-23 11:16:18.541277318 +0000 UTC m=+1643.165957177" observedRunningTime="2026-01-23 11:16:19.744618853 +0000 UTC m=+1644.369298712" watchObservedRunningTime="2026-01-23 11:16:19.758349285 +0000 UTC m=+1644.383029144" Jan 23 11:16:20 crc kubenswrapper[4689]: I0123 11:16:20.742050 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerStarted","Data":"b5902f87c0cc24da5e2a217c5bbb5034f9f2209854c8e33f40f6890cc8946339"} Jan 23 11:16:22 crc kubenswrapper[4689]: I0123 11:16:22.782847 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerStarted","Data":"ba98e616f38592c4add977eb79cbd3f3187315d39c359a07a765aa5294a9eb3e"} Jan 23 11:16:22 crc kubenswrapper[4689]: I0123 11:16:22.784565 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:16:22 crc kubenswrapper[4689]: I0123 11:16:22.823084 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.408855333 podStartE2EDuration="9.823059059s" podCreationTimestamp="2026-01-23 11:16:13 +0000 UTC" firstStartedPulling="2026-01-23 11:16:14.898426968 +0000 UTC m=+1639.523106827" lastFinishedPulling="2026-01-23 11:16:21.312630704 +0000 UTC m=+1645.937310553" observedRunningTime="2026-01-23 11:16:22.813766268 +0000 UTC m=+1647.438446127" watchObservedRunningTime="2026-01-23 11:16:22.823059059 +0000 UTC m=+1647.447738918" Jan 23 11:16:28 crc kubenswrapper[4689]: I0123 11:16:28.640083 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:16:28 crc kubenswrapper[4689]: E0123 11:16:28.640576 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:16:40 crc kubenswrapper[4689]: I0123 11:16:40.641097 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:16:40 crc kubenswrapper[4689]: E0123 11:16:40.642465 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:16:44 crc kubenswrapper[4689]: I0123 11:16:44.359758 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 23 11:16:48 crc kubenswrapper[4689]: I0123 11:16:48.442299 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:16:48 crc kubenswrapper[4689]: I0123 11:16:48.443190 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="30b34da3-4b55-46a6-88c3-4bdfcbde2f66" containerName="kube-state-metrics" containerID="cri-o://a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2" gracePeriod=30 Jan 23 11:16:48 crc kubenswrapper[4689]: I0123 11:16:48.650375 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:16:48 crc kubenswrapper[4689]: I0123 11:16:48.650987 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="d02f78f3-0cfc-48d4-8705-222943579cb0" containerName="mysqld-exporter" containerID="cri-o://2a4d3a83def808df40246fcc8f8d7ee00f26f048d72b1c4f0eeee13a76ae11b7" gracePeriod=30 Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.049538 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.123558 4689 generic.go:334] "Generic (PLEG): container finished" podID="30b34da3-4b55-46a6-88c3-4bdfcbde2f66" containerID="a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2" exitCode=2 Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.124413 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.125245 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"30b34da3-4b55-46a6-88c3-4bdfcbde2f66","Type":"ContainerDied","Data":"a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2"} Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.125289 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"30b34da3-4b55-46a6-88c3-4bdfcbde2f66","Type":"ContainerDied","Data":"0716e26846fef6cd3089a5560c606797afbb6bccc91f2cb7cb5583eda8d20ead"} Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.125307 4689 scope.go:117] "RemoveContainer" containerID="a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.136281 4689 generic.go:334] "Generic (PLEG): container finished" podID="d02f78f3-0cfc-48d4-8705-222943579cb0" containerID="2a4d3a83def808df40246fcc8f8d7ee00f26f048d72b1c4f0eeee13a76ae11b7" exitCode=2 Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.136339 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"d02f78f3-0cfc-48d4-8705-222943579cb0","Type":"ContainerDied","Data":"2a4d3a83def808df40246fcc8f8d7ee00f26f048d72b1c4f0eeee13a76ae11b7"} Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.159432 4689 scope.go:117] "RemoveContainer" containerID="a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2" Jan 23 11:16:49 crc kubenswrapper[4689]: E0123 11:16:49.160301 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2\": container with ID starting with a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2 not found: ID does not exist" containerID="a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.160328 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2"} err="failed to get container status \"a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2\": rpc error: code = NotFound desc = could not find container \"a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2\": container with ID starting with a05564bfb600026c4a782b616734f2dd4719c8f259bf60932a043f4def8ee1d2 not found: ID does not exist" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.175871 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qf4vt\" (UniqueName: \"kubernetes.io/projected/30b34da3-4b55-46a6-88c3-4bdfcbde2f66-kube-api-access-qf4vt\") pod \"30b34da3-4b55-46a6-88c3-4bdfcbde2f66\" (UID: \"30b34da3-4b55-46a6-88c3-4bdfcbde2f66\") " Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.182303 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30b34da3-4b55-46a6-88c3-4bdfcbde2f66-kube-api-access-qf4vt" (OuterVolumeSpecName: "kube-api-access-qf4vt") pod "30b34da3-4b55-46a6-88c3-4bdfcbde2f66" (UID: "30b34da3-4b55-46a6-88c3-4bdfcbde2f66"). InnerVolumeSpecName "kube-api-access-qf4vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.248924 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.280027 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qf4vt\" (UniqueName: \"kubernetes.io/projected/30b34da3-4b55-46a6-88c3-4bdfcbde2f66-kube-api-access-qf4vt\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.381469 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44tvv\" (UniqueName: \"kubernetes.io/projected/d02f78f3-0cfc-48d4-8705-222943579cb0-kube-api-access-44tvv\") pod \"d02f78f3-0cfc-48d4-8705-222943579cb0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.381736 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-combined-ca-bundle\") pod \"d02f78f3-0cfc-48d4-8705-222943579cb0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.381825 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-config-data\") pod \"d02f78f3-0cfc-48d4-8705-222943579cb0\" (UID: \"d02f78f3-0cfc-48d4-8705-222943579cb0\") " Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.386907 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d02f78f3-0cfc-48d4-8705-222943579cb0-kube-api-access-44tvv" (OuterVolumeSpecName: "kube-api-access-44tvv") pod "d02f78f3-0cfc-48d4-8705-222943579cb0" (UID: "d02f78f3-0cfc-48d4-8705-222943579cb0"). InnerVolumeSpecName "kube-api-access-44tvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.425966 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d02f78f3-0cfc-48d4-8705-222943579cb0" (UID: "d02f78f3-0cfc-48d4-8705-222943579cb0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.459006 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-config-data" (OuterVolumeSpecName: "config-data") pod "d02f78f3-0cfc-48d4-8705-222943579cb0" (UID: "d02f78f3-0cfc-48d4-8705-222943579cb0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.461792 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.476120 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.496156 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.496184 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f78f3-0cfc-48d4-8705-222943579cb0-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.496194 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44tvv\" (UniqueName: \"kubernetes.io/projected/d02f78f3-0cfc-48d4-8705-222943579cb0-kube-api-access-44tvv\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.505257 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:16:49 crc kubenswrapper[4689]: E0123 11:16:49.505691 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30b34da3-4b55-46a6-88c3-4bdfcbde2f66" containerName="kube-state-metrics" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.505703 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="30b34da3-4b55-46a6-88c3-4bdfcbde2f66" containerName="kube-state-metrics" Jan 23 11:16:49 crc kubenswrapper[4689]: E0123 11:16:49.505737 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02f78f3-0cfc-48d4-8705-222943579cb0" containerName="mysqld-exporter" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.505743 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02f78f3-0cfc-48d4-8705-222943579cb0" containerName="mysqld-exporter" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.506138 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d02f78f3-0cfc-48d4-8705-222943579cb0" containerName="mysqld-exporter" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.506241 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="30b34da3-4b55-46a6-88c3-4bdfcbde2f66" containerName="kube-state-metrics" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.507040 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.512459 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.512639 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.516766 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.653441 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30b34da3-4b55-46a6-88c3-4bdfcbde2f66" path="/var/lib/kubelet/pods/30b34da3-4b55-46a6-88c3-4bdfcbde2f66/volumes" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.699665 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.699759 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.699935 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24nxh\" (UniqueName: \"kubernetes.io/projected/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-api-access-24nxh\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.699982 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.802289 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24nxh\" (UniqueName: \"kubernetes.io/projected/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-api-access-24nxh\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.802609 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.802794 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.802925 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.806974 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.807382 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.813453 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db6cec49-030a-4282-a9a4-890f2783c0e5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.821566 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24nxh\" (UniqueName: \"kubernetes.io/projected/db6cec49-030a-4282-a9a4-890f2783c0e5-kube-api-access-24nxh\") pod \"kube-state-metrics-0\" (UID: \"db6cec49-030a-4282-a9a4-890f2783c0e5\") " pod="openstack/kube-state-metrics-0" Jan 23 11:16:49 crc kubenswrapper[4689]: I0123 11:16:49.965776 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.201842 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"d02f78f3-0cfc-48d4-8705-222943579cb0","Type":"ContainerDied","Data":"34e22b57eaa02df55ed3c7aedc9671be13cbe4c286dcc58a015889cc02ded57f"} Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.201906 4689 scope.go:117] "RemoveContainer" containerID="2a4d3a83def808df40246fcc8f8d7ee00f26f048d72b1c4f0eeee13a76ae11b7" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.202062 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.278713 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.310158 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.326221 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.327743 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.331358 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.331629 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.345580 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.432352 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrjdc\" (UniqueName: \"kubernetes.io/projected/451b4903-934f-44f5-8506-2fc093d6b310-kube-api-access-zrjdc\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.432500 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.432656 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-config-data\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.432729 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.535803 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrjdc\" (UniqueName: \"kubernetes.io/projected/451b4903-934f-44f5-8506-2fc093d6b310-kube-api-access-zrjdc\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.536353 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.536429 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-config-data\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.536464 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.542099 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-config-data\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.544803 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.559545 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrjdc\" (UniqueName: \"kubernetes.io/projected/451b4903-934f-44f5-8506-2fc093d6b310-kube-api-access-zrjdc\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.565194 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451b4903-934f-44f5-8506-2fc093d6b310-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"451b4903-934f-44f5-8506-2fc093d6b310\") " pod="openstack/mysqld-exporter-0" Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.573353 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 11:16:50 crc kubenswrapper[4689]: I0123 11:16:50.659042 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.183664 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 23 11:16:51 crc kubenswrapper[4689]: W0123 11:16:51.185346 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod451b4903_934f_44f5_8506_2fc093d6b310.slice/crio-c2d1ee9703cf49115860d3b6b4e84e25b9fac6f6b7076dbc45248465eff3deda WatchSource:0}: Error finding container c2d1ee9703cf49115860d3b6b4e84e25b9fac6f6b7076dbc45248465eff3deda: Status 404 returned error can't find the container with id c2d1ee9703cf49115860d3b6b4e84e25b9fac6f6b7076dbc45248465eff3deda Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.235826 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db6cec49-030a-4282-a9a4-890f2783c0e5","Type":"ContainerStarted","Data":"7c6024d1d94ca989e358842d0ac3f562b67ec8b1144437bb09e0cabf1ebfe4fb"} Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.235875 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db6cec49-030a-4282-a9a4-890f2783c0e5","Type":"ContainerStarted","Data":"16d5f17d7b37462c9990bdf243918a161bb5077d91acb00fddf07abf44a10054"} Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.235961 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.241931 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"451b4903-934f-44f5-8506-2fc093d6b310","Type":"ContainerStarted","Data":"c2d1ee9703cf49115860d3b6b4e84e25b9fac6f6b7076dbc45248465eff3deda"} Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.260552 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.260845 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-central-agent" containerID="cri-o://e44c9b081b1be96d0028ac482eca3ad1922b272a7f17eae9d02f031ea67d8f10" gracePeriod=30 Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.260877 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="proxy-httpd" containerID="cri-o://ba98e616f38592c4add977eb79cbd3f3187315d39c359a07a765aa5294a9eb3e" gracePeriod=30 Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.260928 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="sg-core" containerID="cri-o://b5902f87c0cc24da5e2a217c5bbb5034f9f2209854c8e33f40f6890cc8946339" gracePeriod=30 Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.260931 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-notification-agent" containerID="cri-o://dbcfe4b3631f3b06f1321ff9886d5690fa756317c085de7396feba6e6e7ca0fe" gracePeriod=30 Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.268348 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.8589497879999999 podStartE2EDuration="2.268326048s" podCreationTimestamp="2026-01-23 11:16:49 +0000 UTC" firstStartedPulling="2026-01-23 11:16:50.58127995 +0000 UTC m=+1675.205959809" lastFinishedPulling="2026-01-23 11:16:50.99065621 +0000 UTC m=+1675.615336069" observedRunningTime="2026-01-23 11:16:51.262431532 +0000 UTC m=+1675.887111391" watchObservedRunningTime="2026-01-23 11:16:51.268326048 +0000 UTC m=+1675.893005907" Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.644532 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:16:51 crc kubenswrapper[4689]: E0123 11:16:51.645589 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:16:51 crc kubenswrapper[4689]: I0123 11:16:51.660134 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d02f78f3-0cfc-48d4-8705-222943579cb0" path="/var/lib/kubelet/pods/d02f78f3-0cfc-48d4-8705-222943579cb0/volumes" Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.254314 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"451b4903-934f-44f5-8506-2fc093d6b310","Type":"ContainerStarted","Data":"867c154b65921d16a9b6bd005c4bed08f45abf70941fea330053e12f74ec429a"} Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.257832 4689 generic.go:334] "Generic (PLEG): container finished" podID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerID="ba98e616f38592c4add977eb79cbd3f3187315d39c359a07a765aa5294a9eb3e" exitCode=0 Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.257868 4689 generic.go:334] "Generic (PLEG): container finished" podID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerID="b5902f87c0cc24da5e2a217c5bbb5034f9f2209854c8e33f40f6890cc8946339" exitCode=2 Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.257876 4689 generic.go:334] "Generic (PLEG): container finished" podID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerID="e44c9b081b1be96d0028ac482eca3ad1922b272a7f17eae9d02f031ea67d8f10" exitCode=0 Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.257920 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerDied","Data":"ba98e616f38592c4add977eb79cbd3f3187315d39c359a07a765aa5294a9eb3e"} Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.257961 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerDied","Data":"b5902f87c0cc24da5e2a217c5bbb5034f9f2209854c8e33f40f6890cc8946339"} Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.257973 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerDied","Data":"e44c9b081b1be96d0028ac482eca3ad1922b272a7f17eae9d02f031ea67d8f10"} Jan 23 11:16:52 crc kubenswrapper[4689]: I0123 11:16:52.285725 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=1.668850109 podStartE2EDuration="2.285708509s" podCreationTimestamp="2026-01-23 11:16:50 +0000 UTC" firstStartedPulling="2026-01-23 11:16:51.187874704 +0000 UTC m=+1675.812554563" lastFinishedPulling="2026-01-23 11:16:51.804733084 +0000 UTC m=+1676.429412963" observedRunningTime="2026-01-23 11:16:52.272051099 +0000 UTC m=+1676.896730968" watchObservedRunningTime="2026-01-23 11:16:52.285708509 +0000 UTC m=+1676.910388368" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.315365 4689 generic.go:334] "Generic (PLEG): container finished" podID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerID="dbcfe4b3631f3b06f1321ff9886d5690fa756317c085de7396feba6e6e7ca0fe" exitCode=0 Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.315456 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerDied","Data":"dbcfe4b3631f3b06f1321ff9886d5690fa756317c085de7396feba6e6e7ca0fe"} Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.315815 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0ea2fd87-f2b0-477b-9289-fac96dfb3f22","Type":"ContainerDied","Data":"7683f2e0fcf5fded7c0a9595c05cb3ad7168ecc1729634ba6fb3f48343c9a849"} Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.315832 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7683f2e0fcf5fded7c0a9595c05cb3ad7168ecc1729634ba6fb3f48343c9a849" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.341756 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.497620 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-run-httpd\") pod \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.497685 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-combined-ca-bundle\") pod \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.497904 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-sg-core-conf-yaml\") pod \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.497946 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vp9k\" (UniqueName: \"kubernetes.io/projected/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-kube-api-access-7vp9k\") pod \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.498016 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-log-httpd\") pod \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.498035 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-scripts\") pod \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.498131 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-config-data\") pod \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\" (UID: \"0ea2fd87-f2b0-477b-9289-fac96dfb3f22\") " Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.499478 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0ea2fd87-f2b0-477b-9289-fac96dfb3f22" (UID: "0ea2fd87-f2b0-477b-9289-fac96dfb3f22"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.500346 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0ea2fd87-f2b0-477b-9289-fac96dfb3f22" (UID: "0ea2fd87-f2b0-477b-9289-fac96dfb3f22"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.504178 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-kube-api-access-7vp9k" (OuterVolumeSpecName: "kube-api-access-7vp9k") pod "0ea2fd87-f2b0-477b-9289-fac96dfb3f22" (UID: "0ea2fd87-f2b0-477b-9289-fac96dfb3f22"). InnerVolumeSpecName "kube-api-access-7vp9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.504935 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-scripts" (OuterVolumeSpecName: "scripts") pod "0ea2fd87-f2b0-477b-9289-fac96dfb3f22" (UID: "0ea2fd87-f2b0-477b-9289-fac96dfb3f22"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.541432 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0ea2fd87-f2b0-477b-9289-fac96dfb3f22" (UID: "0ea2fd87-f2b0-477b-9289-fac96dfb3f22"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.601505 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.601545 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.601560 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vp9k\" (UniqueName: \"kubernetes.io/projected/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-kube-api-access-7vp9k\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.601571 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.601586 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.617431 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ea2fd87-f2b0-477b-9289-fac96dfb3f22" (UID: "0ea2fd87-f2b0-477b-9289-fac96dfb3f22"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.634514 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-config-data" (OuterVolumeSpecName: "config-data") pod "0ea2fd87-f2b0-477b-9289-fac96dfb3f22" (UID: "0ea2fd87-f2b0-477b-9289-fac96dfb3f22"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.704854 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:56 crc kubenswrapper[4689]: I0123 11:16:56.704897 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea2fd87-f2b0-477b-9289-fac96dfb3f22-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.325334 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.366206 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.378097 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.405265 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:57 crc kubenswrapper[4689]: E0123 11:16:57.405794 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-central-agent" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.405814 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-central-agent" Jan 23 11:16:57 crc kubenswrapper[4689]: E0123 11:16:57.405831 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="sg-core" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.405837 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="sg-core" Jan 23 11:16:57 crc kubenswrapper[4689]: E0123 11:16:57.405860 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="proxy-httpd" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.405866 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="proxy-httpd" Jan 23 11:16:57 crc kubenswrapper[4689]: E0123 11:16:57.405892 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-notification-agent" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.405898 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-notification-agent" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.406174 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-central-agent" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.406191 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="proxy-httpd" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.406203 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="ceilometer-notification-agent" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.406217 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" containerName="sg-core" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.408215 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.414099 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.414223 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.414351 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.425393 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.524857 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-config-data\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.525000 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4wdj\" (UniqueName: \"kubernetes.io/projected/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-kube-api-access-c4wdj\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.525224 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.525289 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.525545 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.525787 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-log-httpd\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.525848 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-scripts\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.525895 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-run-httpd\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.627748 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-log-httpd\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.627827 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-scripts\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.627863 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-run-httpd\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.627930 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-config-data\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.627969 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4wdj\" (UniqueName: \"kubernetes.io/projected/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-kube-api-access-c4wdj\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.628034 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.628072 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.628142 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.628330 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-log-httpd\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.628732 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-run-httpd\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.635844 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.636024 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.636021 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.636397 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-config-data\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.639208 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-scripts\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.659990 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ea2fd87-f2b0-477b-9289-fac96dfb3f22" path="/var/lib/kubelet/pods/0ea2fd87-f2b0-477b-9289-fac96dfb3f22/volumes" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.663098 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4wdj\" (UniqueName: \"kubernetes.io/projected/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-kube-api-access-c4wdj\") pod \"ceilometer-0\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " pod="openstack/ceilometer-0" Jan 23 11:16:57 crc kubenswrapper[4689]: I0123 11:16:57.732528 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.028898 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-bvhc5"] Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.095772 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-bvhc5"] Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.199359 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-pzpjk"] Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.201322 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.236198 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-pzpjk"] Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.249523 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.336660 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerStarted","Data":"fea379c297f2f7a5bc1615b2913e6fc20cbdffcd107218dfbb8a79fe42209b9a"} Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.349048 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgqhl\" (UniqueName: \"kubernetes.io/projected/90127ad8-6a3a-402c-809f-d678f574fb09-kube-api-access-sgqhl\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.349217 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-config-data\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.349254 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-combined-ca-bundle\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.451477 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgqhl\" (UniqueName: \"kubernetes.io/projected/90127ad8-6a3a-402c-809f-d678f574fb09-kube-api-access-sgqhl\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.451592 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-config-data\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.451619 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-combined-ca-bundle\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.458752 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-combined-ca-bundle\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.459075 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-config-data\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.471433 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgqhl\" (UniqueName: \"kubernetes.io/projected/90127ad8-6a3a-402c-809f-d678f574fb09-kube-api-access-sgqhl\") pod \"heat-db-sync-pzpjk\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:58 crc kubenswrapper[4689]: I0123 11:16:58.593671 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pzpjk" Jan 23 11:16:59 crc kubenswrapper[4689]: I0123 11:16:59.085445 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-pzpjk"] Jan 23 11:16:59 crc kubenswrapper[4689]: I0123 11:16:59.353210 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerStarted","Data":"7070ea8fac3d9e20ca891446b9e9e14d807c0886ccb525ba2c569f2f00be2037"} Jan 23 11:16:59 crc kubenswrapper[4689]: I0123 11:16:59.355230 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pzpjk" event={"ID":"90127ad8-6a3a-402c-809f-d678f574fb09","Type":"ContainerStarted","Data":"0d71bdee8585ec27455a6d47fbeedfe67058e5949c8fcacd45be1a720e674803"} Jan 23 11:16:59 crc kubenswrapper[4689]: I0123 11:16:59.659540 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4" path="/var/lib/kubelet/pods/47bc8c20-f1b7-4029-aaaa-8d9d381a4ef4/volumes" Jan 23 11:16:59 crc kubenswrapper[4689]: I0123 11:16:59.995710 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 23 11:17:00 crc kubenswrapper[4689]: I0123 11:17:00.408049 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerStarted","Data":"4a9943b0d440f3acae9aea10dbab270b5fdd5515bc520ecbdc79fea970a85ad4"} Jan 23 11:17:00 crc kubenswrapper[4689]: I0123 11:17:00.642539 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:17:00 crc kubenswrapper[4689]: I0123 11:17:00.735568 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:17:01 crc kubenswrapper[4689]: I0123 11:17:01.425784 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerStarted","Data":"c56376933aad1b3dd0ef5ee9ff1c1fd628644d7845335a36eec18f3f7ea49c52"} Jan 23 11:17:01 crc kubenswrapper[4689]: I0123 11:17:01.478276 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:17:02 crc kubenswrapper[4689]: I0123 11:17:02.445839 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerStarted","Data":"b4bcf012c9d4f2a80ffba9889ffb71cb99341a7bf0e95297cfb4caf805914e54"} Jan 23 11:17:02 crc kubenswrapper[4689]: I0123 11:17:02.446059 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-central-agent" containerID="cri-o://7070ea8fac3d9e20ca891446b9e9e14d807c0886ccb525ba2c569f2f00be2037" gracePeriod=30 Jan 23 11:17:02 crc kubenswrapper[4689]: I0123 11:17:02.446376 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:17:02 crc kubenswrapper[4689]: I0123 11:17:02.446797 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="proxy-httpd" containerID="cri-o://b4bcf012c9d4f2a80ffba9889ffb71cb99341a7bf0e95297cfb4caf805914e54" gracePeriod=30 Jan 23 11:17:02 crc kubenswrapper[4689]: I0123 11:17:02.446862 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="sg-core" containerID="cri-o://c56376933aad1b3dd0ef5ee9ff1c1fd628644d7845335a36eec18f3f7ea49c52" gracePeriod=30 Jan 23 11:17:02 crc kubenswrapper[4689]: I0123 11:17:02.446919 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-notification-agent" containerID="cri-o://4a9943b0d440f3acae9aea10dbab270b5fdd5515bc520ecbdc79fea970a85ad4" gracePeriod=30 Jan 23 11:17:02 crc kubenswrapper[4689]: I0123 11:17:02.515011 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.90723325 podStartE2EDuration="5.514988955s" podCreationTimestamp="2026-01-23 11:16:57 +0000 UTC" firstStartedPulling="2026-01-23 11:16:58.218523209 +0000 UTC m=+1682.843203068" lastFinishedPulling="2026-01-23 11:17:01.826278914 +0000 UTC m=+1686.450958773" observedRunningTime="2026-01-23 11:17:02.493974142 +0000 UTC m=+1687.118653991" watchObservedRunningTime="2026-01-23 11:17:02.514988955 +0000 UTC m=+1687.139668814" Jan 23 11:17:03 crc kubenswrapper[4689]: I0123 11:17:03.496314 4689 generic.go:334] "Generic (PLEG): container finished" podID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerID="b4bcf012c9d4f2a80ffba9889ffb71cb99341a7bf0e95297cfb4caf805914e54" exitCode=0 Jan 23 11:17:03 crc kubenswrapper[4689]: I0123 11:17:03.496600 4689 generic.go:334] "Generic (PLEG): container finished" podID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerID="c56376933aad1b3dd0ef5ee9ff1c1fd628644d7845335a36eec18f3f7ea49c52" exitCode=2 Jan 23 11:17:03 crc kubenswrapper[4689]: I0123 11:17:03.496393 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerDied","Data":"b4bcf012c9d4f2a80ffba9889ffb71cb99341a7bf0e95297cfb4caf805914e54"} Jan 23 11:17:03 crc kubenswrapper[4689]: I0123 11:17:03.496648 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerDied","Data":"c56376933aad1b3dd0ef5ee9ff1c1fd628644d7845335a36eec18f3f7ea49c52"} Jan 23 11:17:03 crc kubenswrapper[4689]: I0123 11:17:03.496670 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerDied","Data":"4a9943b0d440f3acae9aea10dbab270b5fdd5515bc520ecbdc79fea970a85ad4"} Jan 23 11:17:03 crc kubenswrapper[4689]: I0123 11:17:03.496615 4689 generic.go:334] "Generic (PLEG): container finished" podID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerID="4a9943b0d440f3acae9aea10dbab270b5fdd5515bc520ecbdc79fea970a85ad4" exitCode=0 Jan 23 11:17:05 crc kubenswrapper[4689]: I0123 11:17:05.668882 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="rabbitmq" containerID="cri-o://bfd646fa019b71a59e5091a2a0fab6b2be77c77a25c89706baf6b038b6f7b2da" gracePeriod=604796 Jan 23 11:17:06 crc kubenswrapper[4689]: I0123 11:17:06.015726 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-2" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="rabbitmq" containerID="cri-o://4e3411d8d7429c83dfdaa7e2793aa4c0dfcd7977a791ce5fb3f4967130172f84" gracePeriod=604795 Jan 23 11:17:06 crc kubenswrapper[4689]: I0123 11:17:06.533743 4689 generic.go:334] "Generic (PLEG): container finished" podID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerID="7070ea8fac3d9e20ca891446b9e9e14d807c0886ccb525ba2c569f2f00be2037" exitCode=0 Jan 23 11:17:06 crc kubenswrapper[4689]: I0123 11:17:06.533854 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerDied","Data":"7070ea8fac3d9e20ca891446b9e9e14d807c0886ccb525ba2c569f2f00be2037"} Jan 23 11:17:06 crc kubenswrapper[4689]: I0123 11:17:06.640623 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:17:06 crc kubenswrapper[4689]: E0123 11:17:06.641042 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.460527 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.488986 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4wdj\" (UniqueName: \"kubernetes.io/projected/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-kube-api-access-c4wdj\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.489323 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-scripts\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.489368 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-combined-ca-bundle\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.489408 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-config-data\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.489444 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-run-httpd\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.489604 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-sg-core-conf-yaml\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.489642 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-log-httpd\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.489694 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-ceilometer-tls-certs\") pod \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\" (UID: \"c8abf561-73a6-4d8b-b4d6-13b211be5ba1\") " Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.490565 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.492358 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.538711 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-kube-api-access-c4wdj" (OuterVolumeSpecName: "kube-api-access-c4wdj") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "kube-api-access-c4wdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.540185 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-scripts" (OuterVolumeSpecName: "scripts") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.547396 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.592957 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.592996 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.593008 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4wdj\" (UniqueName: \"kubernetes.io/projected/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-kube-api-access-c4wdj\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.593022 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.593035 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.606880 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8abf561-73a6-4d8b-b4d6-13b211be5ba1","Type":"ContainerDied","Data":"fea379c297f2f7a5bc1615b2913e6fc20cbdffcd107218dfbb8a79fe42209b9a"} Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.606953 4689 scope.go:117] "RemoveContainer" containerID="b4bcf012c9d4f2a80ffba9889ffb71cb99341a7bf0e95297cfb4caf805914e54" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.607113 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.627486 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.628289 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.665977 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-config-data" (OuterVolumeSpecName: "config-data") pod "c8abf561-73a6-4d8b-b4d6-13b211be5ba1" (UID: "c8abf561-73a6-4d8b-b4d6-13b211be5ba1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.695284 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.695317 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.695327 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8abf561-73a6-4d8b-b4d6-13b211be5ba1-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.950491 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.975972 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.990833 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:17:09 crc kubenswrapper[4689]: E0123 11:17:09.991405 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="proxy-httpd" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991425 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="proxy-httpd" Jan 23 11:17:09 crc kubenswrapper[4689]: E0123 11:17:09.991457 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="sg-core" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991463 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="sg-core" Jan 23 11:17:09 crc kubenswrapper[4689]: E0123 11:17:09.991473 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-central-agent" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991479 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-central-agent" Jan 23 11:17:09 crc kubenswrapper[4689]: E0123 11:17:09.991489 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-notification-agent" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991495 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-notification-agent" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991692 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="proxy-httpd" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991707 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-notification-agent" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991719 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="ceilometer-central-agent" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.991740 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" containerName="sg-core" Jan 23 11:17:09 crc kubenswrapper[4689]: I0123 11:17:09.993813 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.005691 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.005926 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.006787 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.016029 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.105328 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.105713 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.105734 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.105762 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-run-httpd\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.105849 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-config-data\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.105892 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-scripts\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.105942 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-log-httpd\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.106044 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2hnz\" (UniqueName: \"kubernetes.io/projected/141af3c4-ba78-43ed-af9c-0f98846eb67f-kube-api-access-d2hnz\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.207852 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2hnz\" (UniqueName: \"kubernetes.io/projected/141af3c4-ba78-43ed-af9c-0f98846eb67f-kube-api-access-d2hnz\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.207958 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.208006 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.208024 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.208049 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-run-httpd\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.208113 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-config-data\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.208193 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-scripts\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.208233 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-log-httpd\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.213809 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-run-httpd\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.213852 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-log-httpd\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.214332 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-scripts\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.215137 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.217665 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-config-data\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.221930 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.222016 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.225623 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2hnz\" (UniqueName: \"kubernetes.io/projected/141af3c4-ba78-43ed-af9c-0f98846eb67f-kube-api-access-d2hnz\") pod \"ceilometer-0\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " pod="openstack/ceilometer-0" Jan 23 11:17:10 crc kubenswrapper[4689]: I0123 11:17:10.322739 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 11:17:11 crc kubenswrapper[4689]: I0123 11:17:11.251748 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.128:5671: connect: connection refused" Jan 23 11:17:11 crc kubenswrapper[4689]: I0123 11:17:11.347706 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 23 11:17:11 crc kubenswrapper[4689]: I0123 11:17:11.660542 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8abf561-73a6-4d8b-b4d6-13b211be5ba1" path="/var/lib/kubelet/pods/c8abf561-73a6-4d8b-b4d6-13b211be5ba1/volumes" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.653270 4689 generic.go:334] "Generic (PLEG): container finished" podID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerID="bfd646fa019b71a59e5091a2a0fab6b2be77c77a25c89706baf6b038b6f7b2da" exitCode=0 Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.653397 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"84c84a76-3fda-4d1e-bc46-e806b5462845","Type":"ContainerDied","Data":"bfd646fa019b71a59e5091a2a0fab6b2be77c77a25c89706baf6b038b6f7b2da"} Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.662266 4689 generic.go:334] "Generic (PLEG): container finished" podID="126daef6-1490-45c1-898a-b51a0b069546" containerID="4e3411d8d7429c83dfdaa7e2793aa4c0dfcd7977a791ce5fb3f4967130172f84" exitCode=0 Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.662317 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"126daef6-1490-45c1-898a-b51a0b069546","Type":"ContainerDied","Data":"4e3411d8d7429c83dfdaa7e2793aa4c0dfcd7977a791ce5fb3f4967130172f84"} Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.819198 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-594cb89c79-jfqrr"] Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.822072 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.826582 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.834974 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-594cb89c79-jfqrr"] Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.893003 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-sb\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.893112 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-swift-storage-0\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.893138 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-config\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.893203 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-nb\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.893318 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7vvq\" (UniqueName: \"kubernetes.io/projected/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-kube-api-access-d7vvq\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.893364 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-svc\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.893382 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-openstack-edpm-ipam\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.996334 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-sb\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.996474 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-swift-storage-0\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.996516 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-config\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.996578 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-nb\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.996768 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7vvq\" (UniqueName: \"kubernetes.io/projected/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-kube-api-access-d7vvq\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.996890 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-svc\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.996924 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-openstack-edpm-ipam\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.998091 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-openstack-edpm-ipam\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:12 crc kubenswrapper[4689]: I0123 11:17:12.998366 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-config\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:13 crc kubenswrapper[4689]: I0123 11:17:13.000023 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-swift-storage-0\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:13 crc kubenswrapper[4689]: I0123 11:17:13.000296 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-sb\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:13 crc kubenswrapper[4689]: I0123 11:17:13.000489 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-svc\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:13 crc kubenswrapper[4689]: I0123 11:17:13.001016 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-nb\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:13 crc kubenswrapper[4689]: I0123 11:17:13.030833 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7vvq\" (UniqueName: \"kubernetes.io/projected/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-kube-api-access-d7vvq\") pod \"dnsmasq-dns-594cb89c79-jfqrr\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:13 crc kubenswrapper[4689]: I0123 11:17:13.168308 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:19 crc kubenswrapper[4689]: I0123 11:17:19.977756 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085462 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085546 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-plugins-conf\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085574 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-erlang-cookie\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085728 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/84c84a76-3fda-4d1e-bc46-e806b5462845-pod-info\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085760 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-config-data\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085847 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-confd\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085883 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-tls\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.085987 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5d2r\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-kube-api-access-c5d2r\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.086020 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-server-conf\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.086062 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/84c84a76-3fda-4d1e-bc46-e806b5462845-erlang-cookie-secret\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.086079 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-plugins\") pod \"84c84a76-3fda-4d1e-bc46-e806b5462845\" (UID: \"84c84a76-3fda-4d1e-bc46-e806b5462845\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.091229 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.094492 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/84c84a76-3fda-4d1e-bc46-e806b5462845-pod-info" (OuterVolumeSpecName: "pod-info") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.095964 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.107782 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.112971 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.121910 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52" (OuterVolumeSpecName: "persistence") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.126984 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84c84a76-3fda-4d1e-bc46-e806b5462845-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.129348 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-kube-api-access-c5d2r" (OuterVolumeSpecName: "kube-api-access-c5d2r") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "kube-api-access-c5d2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.150932 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-config-data" (OuterVolumeSpecName: "config-data") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190084 4689 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190126 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190143 4689 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/84c84a76-3fda-4d1e-bc46-e806b5462845-pod-info\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190185 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190200 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190214 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5d2r\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-kube-api-access-c5d2r\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190226 4689 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/84c84a76-3fda-4d1e-bc46-e806b5462845-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190238 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.190279 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") on node \"crc\" " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.206752 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-server-conf" (OuterVolumeSpecName: "server-conf") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.257469 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "84c84a76-3fda-4d1e-bc46-e806b5462845" (UID: "84c84a76-3fda-4d1e-bc46-e806b5462845"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.260264 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.260404 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52") on node "crc" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.292522 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/84c84a76-3fda-4d1e-bc46-e806b5462845-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.292557 4689 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/84c84a76-3fda-4d1e-bc46-e806b5462845-server-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.292570 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.454527 4689 scope.go:117] "RemoveContainer" containerID="c56376933aad1b3dd0ef5ee9ff1c1fd628644d7845335a36eec18f3f7ea49c52" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.470440 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.470519 4689 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.470701 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sgqhl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-pzpjk_openstack(90127ad8-6a3a-402c-809f-d678f574fb09): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.471932 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-pzpjk" podUID="90127ad8-6a3a-402c-809f-d678f574fb09" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.518224 4689 scope.go:117] "RemoveContainer" containerID="4a9943b0d440f3acae9aea10dbab270b5fdd5515bc520ecbdc79fea970a85ad4" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.519411 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.672323 4689 scope.go:117] "RemoveContainer" containerID="7070ea8fac3d9e20ca891446b9e9e14d807c0886ccb525ba2c569f2f00be2037" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.729424 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-plugins\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.729625 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-server-conf\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.729719 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mg77\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-kube-api-access-8mg77\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.730673 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.730754 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-config-data\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.730783 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-erlang-cookie\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.730826 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-plugins-conf\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.730861 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/126daef6-1490-45c1-898a-b51a0b069546-erlang-cookie-secret\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.730892 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-confd\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.730974 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-tls\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.731004 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/126daef6-1490-45c1-898a-b51a0b069546-pod-info\") pod \"126daef6-1490-45c1-898a-b51a0b069546\" (UID: \"126daef6-1490-45c1-898a-b51a0b069546\") " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.731367 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.731954 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.732921 4689 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.732947 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.734141 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-kube-api-access-8mg77" (OuterVolumeSpecName: "kube-api-access-8mg77") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "kube-api-access-8mg77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.739599 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.741759 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/126daef6-1490-45c1-898a-b51a0b069546-pod-info" (OuterVolumeSpecName: "pod-info") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.742467 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.742450 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/126daef6-1490-45c1-898a-b51a0b069546-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.761055 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"84c84a76-3fda-4d1e-bc46-e806b5462845","Type":"ContainerDied","Data":"788e4ffeefedad268af4095552f7c69825df0be1295bbe86d4078ec045805263"} Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.761105 4689 scope.go:117] "RemoveContainer" containerID="bfd646fa019b71a59e5091a2a0fab6b2be77c77a25c89706baf6b038b6f7b2da" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.761270 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.765516 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd" (OuterVolumeSpecName: "persistence") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.779116 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"126daef6-1490-45c1-898a-b51a0b069546","Type":"ContainerDied","Data":"6a7c95733a3dd5f7ff3648c5d895f2f023ae9fc9351bf9b304086175357e3541"} Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.779186 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.781796 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-config-data" (OuterVolumeSpecName: "config-data") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.792527 4689 scope.go:117] "RemoveContainer" containerID="8c5b9da8c95f2018a8bac1dadf51c7270e4e896fb8da259fd6208b603520d69d" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.792547 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-pzpjk" podUID="90127ad8-6a3a-402c-809f-d678f574fb09" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.847890 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mg77\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-kube-api-access-8mg77\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.848261 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") on node \"crc\" " Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.848279 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.848291 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.848304 4689 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/126daef6-1490-45c1-898a-b51a0b069546-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.848313 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.848323 4689 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/126daef6-1490-45c1-898a-b51a0b069546-pod-info\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.875109 4689 scope.go:117] "RemoveContainer" containerID="4e3411d8d7429c83dfdaa7e2793aa4c0dfcd7977a791ce5fb3f4967130172f84" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.904659 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-server-conf" (OuterVolumeSpecName: "server-conf") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.912737 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.936968 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.937112 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd") on node "crc" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.937163 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.950597 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.951164 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="rabbitmq" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.951184 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="rabbitmq" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.951199 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="setup-container" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.951207 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="setup-container" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.951224 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="rabbitmq" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.951230 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="rabbitmq" Jan 23 11:17:20 crc kubenswrapper[4689]: E0123 11:17:20.951262 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="setup-container" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.951269 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="setup-container" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.951568 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="126daef6-1490-45c1-898a-b51a0b069546" containerName="rabbitmq" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.951591 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" containerName="rabbitmq" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.953911 4689 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/126daef6-1490-45c1-898a-b51a0b069546-server-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.953942 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.958897 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.960668 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.963926 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.965422 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.965582 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.965825 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.965966 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-xbtxv" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.966143 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 23 11:17:20 crc kubenswrapper[4689]: I0123 11:17:20.970763 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.005329 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "126daef6-1490-45c1-898a-b51a0b069546" (UID: "126daef6-1490-45c1-898a-b51a0b069546"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.013231 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056261 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056335 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056360 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056424 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056460 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056795 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056842 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056886 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056939 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.056968 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-567bq\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-kube-api-access-567bq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.057006 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.057071 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/126daef6-1490-45c1-898a-b51a0b069546-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.096718 4689 scope.go:117] "RemoveContainer" containerID="d16acc7bda60fa122cc4c7a5a7c8aa08193fdfa0635393cc61f64782683992c8" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.125133 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.139140 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.154901 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.158584 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162140 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162221 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162308 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162337 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-567bq\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-kube-api-access-567bq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162377 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162404 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162446 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162460 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162519 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162549 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.162623 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.163364 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.164285 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.165881 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.171089 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.172677 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.172725 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.174482 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.175124 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.175157 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2978a247f81105abfa16be10d09657c6d2bfe07654d1f81f07d27e81227d5715/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.183879 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.190005 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.196237 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.196536 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-567bq\" (UniqueName: \"kubernetes.io/projected/5f60dabd-d5a7-417e-a9f4-2a9f06e4778d-kube-api-access-567bq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.226433 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-594cb89c79-jfqrr"] Jan 23 11:17:21 crc kubenswrapper[4689]: W0123 11:17:21.226823 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9f72aa5_bcec_4d2d_904f_e08310d8ddcd.slice/crio-2e4a437e58219bd8d50f7b67f2630633e05a407df23ae0283d81eed55e2a9368 WatchSource:0}: Error finding container 2e4a437e58219bd8d50f7b67f2630633e05a407df23ae0283d81eed55e2a9368: Status 404 returned error can't find the container with id 2e4a437e58219bd8d50f7b67f2630633e05a407df23ae0283d81eed55e2a9368 Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.262409 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f312554-67b0-406d-8ff9-7e18f8de1e52\") pod \"rabbitmq-cell1-server-0\" (UID: \"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264096 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264167 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/907261fc-5550-4dd8-b645-0341b4bdd4de-pod-info\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264212 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-server-conf\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264247 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-config-data\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264300 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264389 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-889ks\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-kube-api-access-889ks\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264428 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264883 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264913 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/907261fc-5550-4dd8-b645-0341b4bdd4de-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.264938 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.265005 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.367418 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-889ks\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-kube-api-access-889ks\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.367695 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.368171 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.369473 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.369941 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/907261fc-5550-4dd8-b645-0341b4bdd4de-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.370041 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.370252 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.370357 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.370652 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.370928 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.371074 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/907261fc-5550-4dd8-b645-0341b4bdd4de-pod-info\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.371190 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-server-conf\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.371325 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-config-data\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.371460 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.373460 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/907261fc-5550-4dd8-b645-0341b4bdd4de-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.373791 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.374590 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.376092 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-server-conf\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.376972 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/907261fc-5550-4dd8-b645-0341b4bdd4de-config-data\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.381928 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.381972 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1a1c51b49e7f304157ff32a4fbdeaf59458f4c680bdd3694589f5d788ea7a57f/globalmount\"" pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.383798 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/907261fc-5550-4dd8-b645-0341b4bdd4de-pod-info\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.385965 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-889ks\" (UniqueName: \"kubernetes.io/projected/907261fc-5550-4dd8-b645-0341b4bdd4de-kube-api-access-889ks\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.403914 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.443222 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4116ce4c-43da-4d75-9610-29e8ce3ab5bd\") pod \"rabbitmq-server-2\" (UID: \"907261fc-5550-4dd8-b645-0341b4bdd4de\") " pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.515578 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.644942 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:17:21 crc kubenswrapper[4689]: E0123 11:17:21.645254 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.666546 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="126daef6-1490-45c1-898a-b51a0b069546" path="/var/lib/kubelet/pods/126daef6-1490-45c1-898a-b51a0b069546/volumes" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.669884 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84c84a76-3fda-4d1e-bc46-e806b5462845" path="/var/lib/kubelet/pods/84c84a76-3fda-4d1e-bc46-e806b5462845/volumes" Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.818387 4689 generic.go:334] "Generic (PLEG): container finished" podID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerID="ff244f4796cc8efd93a4c881274e705507687239348dac01db3bf0f756a3ce65" exitCode=0 Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.818473 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" event={"ID":"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd","Type":"ContainerDied","Data":"ff244f4796cc8efd93a4c881274e705507687239348dac01db3bf0f756a3ce65"} Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.818499 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" event={"ID":"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd","Type":"ContainerStarted","Data":"2e4a437e58219bd8d50f7b67f2630633e05a407df23ae0283d81eed55e2a9368"} Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.842911 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerStarted","Data":"fe2d0802d82bbb6e0e47d06154288d4db6fda1d8822ef239cc9340555d0413ee"} Jan 23 11:17:21 crc kubenswrapper[4689]: I0123 11:17:21.952958 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 11:17:21 crc kubenswrapper[4689]: W0123 11:17:21.959504 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f60dabd_d5a7_417e_a9f4_2a9f06e4778d.slice/crio-1125c46e85cfafe73ae11f8a7e95119a4c4995eded1c1b97d60a042cbefac83c WatchSource:0}: Error finding container 1125c46e85cfafe73ae11f8a7e95119a4c4995eded1c1b97d60a042cbefac83c: Status 404 returned error can't find the container with id 1125c46e85cfafe73ae11f8a7e95119a4c4995eded1c1b97d60a042cbefac83c Jan 23 11:17:22 crc kubenswrapper[4689]: I0123 11:17:22.162449 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 23 11:17:22 crc kubenswrapper[4689]: I0123 11:17:22.884331 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d","Type":"ContainerStarted","Data":"1125c46e85cfafe73ae11f8a7e95119a4c4995eded1c1b97d60a042cbefac83c"} Jan 23 11:17:22 crc kubenswrapper[4689]: I0123 11:17:22.888789 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" event={"ID":"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd","Type":"ContainerStarted","Data":"fa3ba13303d547324a172573d591fbd540757eae070bba4764b2bec8576df568"} Jan 23 11:17:22 crc kubenswrapper[4689]: I0123 11:17:22.888957 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:22 crc kubenswrapper[4689]: I0123 11:17:22.894107 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"907261fc-5550-4dd8-b645-0341b4bdd4de","Type":"ContainerStarted","Data":"b1662b1cba44e337d862ef457e4cf72dadc8425ab8476c56ad292607390c5e48"} Jan 23 11:17:22 crc kubenswrapper[4689]: I0123 11:17:22.917400 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" podStartSLOduration=10.917378768 podStartE2EDuration="10.917378768s" podCreationTimestamp="2026-01-23 11:17:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:17:22.911293956 +0000 UTC m=+1707.535973815" watchObservedRunningTime="2026-01-23 11:17:22.917378768 +0000 UTC m=+1707.542058627" Jan 23 11:17:23 crc kubenswrapper[4689]: I0123 11:17:23.909281 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d","Type":"ContainerStarted","Data":"b2084a217bf424b3e554137b9579924718e2f7908539928912fa079c4a2042d9"} Jan 23 11:17:24 crc kubenswrapper[4689]: I0123 11:17:24.924916 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"907261fc-5550-4dd8-b645-0341b4bdd4de","Type":"ContainerStarted","Data":"a6eaf0101ac6528cf5089f88344868ea01dca898a4fec0fce4ca15383e1dd55f"} Jan 23 11:17:25 crc kubenswrapper[4689]: I0123 11:17:25.937103 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerStarted","Data":"9c32f1149eaa11f33916981fc8fd2ba53b5fc5ff3ce5418895c08eb8a0538fef"} Jan 23 11:17:26 crc kubenswrapper[4689]: I0123 11:17:26.953954 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerStarted","Data":"6098162babea8cfd4c7f55b669eee4ce04f5786e57f9d4def7f83ed691caddfb"} Jan 23 11:17:27 crc kubenswrapper[4689]: I0123 11:17:27.966307 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerStarted","Data":"8fad4b14ea5ad828a0f6899db108864769ee46a1ca035e7b257cab386013d812"} Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.169325 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.243923 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d99f6bc7f-rftzp"] Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.244132 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerName="dnsmasq-dns" containerID="cri-o://a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068" gracePeriod=10 Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.308931 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.1:5353: connect: connection refused" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.532434 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5596c69fcc-zngbz"] Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.534586 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.579162 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5596c69fcc-zngbz"] Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.673600 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-config\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.673686 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-openstack-edpm-ipam\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.673786 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drndf\" (UniqueName: \"kubernetes.io/projected/50ee26f5-4a1d-44e4-a32e-331b132626ff-kube-api-access-drndf\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.673861 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-ovsdbserver-sb\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.673884 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-dns-svc\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.673925 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-ovsdbserver-nb\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.673956 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-dns-swift-storage-0\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.777254 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-dns-swift-storage-0\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.777724 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-config\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.777777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-openstack-edpm-ipam\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.777946 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drndf\" (UniqueName: \"kubernetes.io/projected/50ee26f5-4a1d-44e4-a32e-331b132626ff-kube-api-access-drndf\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.778092 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-ovsdbserver-sb\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.778122 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-dns-svc\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.778161 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-ovsdbserver-nb\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.778404 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-dns-swift-storage-0\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.778954 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-ovsdbserver-nb\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.779606 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-config\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.779869 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-ovsdbserver-sb\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.779974 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-dns-svc\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.780496 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/50ee26f5-4a1d-44e4-a32e-331b132626ff-openstack-edpm-ipam\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.809264 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drndf\" (UniqueName: \"kubernetes.io/projected/50ee26f5-4a1d-44e4-a32e-331b132626ff-kube-api-access-drndf\") pod \"dnsmasq-dns-5596c69fcc-zngbz\" (UID: \"50ee26f5-4a1d-44e4-a32e-331b132626ff\") " pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.867477 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.917204 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.989478 4689 generic.go:334] "Generic (PLEG): container finished" podID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerID="a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068" exitCode=0 Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.989624 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.989633 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" event={"ID":"b26c1980-a23a-4a11-a321-11fe4bd6b641","Type":"ContainerDied","Data":"a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068"} Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.989681 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d99f6bc7f-rftzp" event={"ID":"b26c1980-a23a-4a11-a321-11fe4bd6b641","Type":"ContainerDied","Data":"601333528c3a016f20d9da27bca39be50f00ffa4dad148844febb3ab66b4c394"} Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.989699 4689 scope.go:117] "RemoveContainer" containerID="a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068" Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.993901 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerStarted","Data":"b8cabd2f0fdc8c5c8d7cea421616ca14c9408a9cb864048d613d4366d22650ff"} Jan 23 11:17:28 crc kubenswrapper[4689]: I0123 11:17:28.994134 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.029751 4689 scope.go:117] "RemoveContainer" containerID="193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.064885 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=13.004038899 podStartE2EDuration="20.064865017s" podCreationTimestamp="2026-01-23 11:17:09 +0000 UTC" firstStartedPulling="2026-01-23 11:17:21.031434845 +0000 UTC m=+1705.656114704" lastFinishedPulling="2026-01-23 11:17:28.092260923 +0000 UTC m=+1712.716940822" observedRunningTime="2026-01-23 11:17:29.035746551 +0000 UTC m=+1713.660426410" watchObservedRunningTime="2026-01-23 11:17:29.064865017 +0000 UTC m=+1713.689544876" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.077092 4689 scope.go:117] "RemoveContainer" containerID="a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068" Jan 23 11:17:29 crc kubenswrapper[4689]: E0123 11:17:29.084016 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068\": container with ID starting with a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068 not found: ID does not exist" containerID="a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.084072 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068"} err="failed to get container status \"a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068\": rpc error: code = NotFound desc = could not find container \"a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068\": container with ID starting with a715211e2ce0d3a6f0cab53a39bb42e3b02888930b3b542b5a3b2007b854c068 not found: ID does not exist" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.084102 4689 scope.go:117] "RemoveContainer" containerID="193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542" Jan 23 11:17:29 crc kubenswrapper[4689]: E0123 11:17:29.090538 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542\": container with ID starting with 193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542 not found: ID does not exist" containerID="193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.090585 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542"} err="failed to get container status \"193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542\": rpc error: code = NotFound desc = could not find container \"193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542\": container with ID starting with 193c39543525ad9a7ad2e9dde2d201160422520a548e48615df9b0d320439542 not found: ID does not exist" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.095218 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-nb\") pod \"b26c1980-a23a-4a11-a321-11fe4bd6b641\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.095294 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-swift-storage-0\") pod \"b26c1980-a23a-4a11-a321-11fe4bd6b641\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.095322 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-svc\") pod \"b26c1980-a23a-4a11-a321-11fe4bd6b641\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.095400 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dttpm\" (UniqueName: \"kubernetes.io/projected/b26c1980-a23a-4a11-a321-11fe4bd6b641-kube-api-access-dttpm\") pod \"b26c1980-a23a-4a11-a321-11fe4bd6b641\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.095438 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-sb\") pod \"b26c1980-a23a-4a11-a321-11fe4bd6b641\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.095457 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-config\") pod \"b26c1980-a23a-4a11-a321-11fe4bd6b641\" (UID: \"b26c1980-a23a-4a11-a321-11fe4bd6b641\") " Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.118478 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b26c1980-a23a-4a11-a321-11fe4bd6b641-kube-api-access-dttpm" (OuterVolumeSpecName: "kube-api-access-dttpm") pod "b26c1980-a23a-4a11-a321-11fe4bd6b641" (UID: "b26c1980-a23a-4a11-a321-11fe4bd6b641"). InnerVolumeSpecName "kube-api-access-dttpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.170938 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b26c1980-a23a-4a11-a321-11fe4bd6b641" (UID: "b26c1980-a23a-4a11-a321-11fe4bd6b641"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.184493 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b26c1980-a23a-4a11-a321-11fe4bd6b641" (UID: "b26c1980-a23a-4a11-a321-11fe4bd6b641"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.200290 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.200323 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dttpm\" (UniqueName: \"kubernetes.io/projected/b26c1980-a23a-4a11-a321-11fe4bd6b641-kube-api-access-dttpm\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.200334 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.214206 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b26c1980-a23a-4a11-a321-11fe4bd6b641" (UID: "b26c1980-a23a-4a11-a321-11fe4bd6b641"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.214761 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-config" (OuterVolumeSpecName: "config") pod "b26c1980-a23a-4a11-a321-11fe4bd6b641" (UID: "b26c1980-a23a-4a11-a321-11fe4bd6b641"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.226835 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b26c1980-a23a-4a11-a321-11fe4bd6b641" (UID: "b26c1980-a23a-4a11-a321-11fe4bd6b641"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.302613 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.302653 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.302663 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b26c1980-a23a-4a11-a321-11fe4bd6b641-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.376187 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d99f6bc7f-rftzp"] Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.387866 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d99f6bc7f-rftzp"] Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.599450 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5596c69fcc-zngbz"] Jan 23 11:17:29 crc kubenswrapper[4689]: I0123 11:17:29.663867 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" path="/var/lib/kubelet/pods/b26c1980-a23a-4a11-a321-11fe4bd6b641/volumes" Jan 23 11:17:30 crc kubenswrapper[4689]: I0123 11:17:30.013973 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" event={"ID":"50ee26f5-4a1d-44e4-a32e-331b132626ff","Type":"ContainerStarted","Data":"8d7fc5d357ca7071dacc602ec97918194fc2f65d34f7dae9f2a0c6f893551470"} Jan 23 11:17:30 crc kubenswrapper[4689]: I0123 11:17:30.014027 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" event={"ID":"50ee26f5-4a1d-44e4-a32e-331b132626ff","Type":"ContainerStarted","Data":"74a239c69247fb65fd53bb5bb981a02ae443d2525320e7b739e58e97a7a57049"} Jan 23 11:17:31 crc kubenswrapper[4689]: I0123 11:17:31.030310 4689 generic.go:334] "Generic (PLEG): container finished" podID="50ee26f5-4a1d-44e4-a32e-331b132626ff" containerID="8d7fc5d357ca7071dacc602ec97918194fc2f65d34f7dae9f2a0c6f893551470" exitCode=0 Jan 23 11:17:31 crc kubenswrapper[4689]: I0123 11:17:31.030420 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" event={"ID":"50ee26f5-4a1d-44e4-a32e-331b132626ff","Type":"ContainerDied","Data":"8d7fc5d357ca7071dacc602ec97918194fc2f65d34f7dae9f2a0c6f893551470"} Jan 23 11:17:31 crc kubenswrapper[4689]: I0123 11:17:31.032000 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" event={"ID":"50ee26f5-4a1d-44e4-a32e-331b132626ff","Type":"ContainerStarted","Data":"6a43a6aecbf35c1eb857160dec2f407d0b0d170d208095344b78a8d29780ad67"} Jan 23 11:17:31 crc kubenswrapper[4689]: I0123 11:17:31.032033 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:32 crc kubenswrapper[4689]: I0123 11:17:32.658784 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" podStartSLOduration=4.658762407 podStartE2EDuration="4.658762407s" podCreationTimestamp="2026-01-23 11:17:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:17:31.052294979 +0000 UTC m=+1715.676974838" watchObservedRunningTime="2026-01-23 11:17:32.658762407 +0000 UTC m=+1717.283442276" Jan 23 11:17:35 crc kubenswrapper[4689]: I0123 11:17:35.078828 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pzpjk" event={"ID":"90127ad8-6a3a-402c-809f-d678f574fb09","Type":"ContainerStarted","Data":"1c8f7b9502409880289e9b7dcca2477d7d5efda1d5de926f2f8394e0fc5baace"} Jan 23 11:17:35 crc kubenswrapper[4689]: I0123 11:17:35.117522 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-pzpjk" podStartSLOduration=2.311739066 podStartE2EDuration="37.117500262s" podCreationTimestamp="2026-01-23 11:16:58 +0000 UTC" firstStartedPulling="2026-01-23 11:16:59.090111817 +0000 UTC m=+1683.714791676" lastFinishedPulling="2026-01-23 11:17:33.895873013 +0000 UTC m=+1718.520552872" observedRunningTime="2026-01-23 11:17:35.101928754 +0000 UTC m=+1719.726608643" watchObservedRunningTime="2026-01-23 11:17:35.117500262 +0000 UTC m=+1719.742180131" Jan 23 11:17:36 crc kubenswrapper[4689]: I0123 11:17:36.640505 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:17:36 crc kubenswrapper[4689]: E0123 11:17:36.641548 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:17:37 crc kubenswrapper[4689]: I0123 11:17:37.104689 4689 generic.go:334] "Generic (PLEG): container finished" podID="90127ad8-6a3a-402c-809f-d678f574fb09" containerID="1c8f7b9502409880289e9b7dcca2477d7d5efda1d5de926f2f8394e0fc5baace" exitCode=0 Jan 23 11:17:37 crc kubenswrapper[4689]: I0123 11:17:37.104769 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pzpjk" event={"ID":"90127ad8-6a3a-402c-809f-d678f574fb09","Type":"ContainerDied","Data":"1c8f7b9502409880289e9b7dcca2477d7d5efda1d5de926f2f8394e0fc5baace"} Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.599179 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pzpjk" Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.683142 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-config-data\") pod \"90127ad8-6a3a-402c-809f-d678f574fb09\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.685413 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgqhl\" (UniqueName: \"kubernetes.io/projected/90127ad8-6a3a-402c-809f-d678f574fb09-kube-api-access-sgqhl\") pod \"90127ad8-6a3a-402c-809f-d678f574fb09\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.685704 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-combined-ca-bundle\") pod \"90127ad8-6a3a-402c-809f-d678f574fb09\" (UID: \"90127ad8-6a3a-402c-809f-d678f574fb09\") " Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.708928 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90127ad8-6a3a-402c-809f-d678f574fb09-kube-api-access-sgqhl" (OuterVolumeSpecName: "kube-api-access-sgqhl") pod "90127ad8-6a3a-402c-809f-d678f574fb09" (UID: "90127ad8-6a3a-402c-809f-d678f574fb09"). InnerVolumeSpecName "kube-api-access-sgqhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.789873 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgqhl\" (UniqueName: \"kubernetes.io/projected/90127ad8-6a3a-402c-809f-d678f574fb09-kube-api-access-sgqhl\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.816371 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90127ad8-6a3a-402c-809f-d678f574fb09" (UID: "90127ad8-6a3a-402c-809f-d678f574fb09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.870483 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5596c69fcc-zngbz" Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.893402 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.934160 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-config-data" (OuterVolumeSpecName: "config-data") pod "90127ad8-6a3a-402c-809f-d678f574fb09" (UID: "90127ad8-6a3a-402c-809f-d678f574fb09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.949495 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-594cb89c79-jfqrr"] Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.949735 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" podUID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerName="dnsmasq-dns" containerID="cri-o://fa3ba13303d547324a172573d591fbd540757eae070bba4764b2bec8576df568" gracePeriod=10 Jan 23 11:17:38 crc kubenswrapper[4689]: I0123 11:17:38.996087 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90127ad8-6a3a-402c-809f-d678f574fb09-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.143723 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pzpjk" event={"ID":"90127ad8-6a3a-402c-809f-d678f574fb09","Type":"ContainerDied","Data":"0d71bdee8585ec27455a6d47fbeedfe67058e5949c8fcacd45be1a720e674803"} Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.143769 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d71bdee8585ec27455a6d47fbeedfe67058e5949c8fcacd45be1a720e674803" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.143857 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pzpjk" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.152467 4689 generic.go:334] "Generic (PLEG): container finished" podID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerID="fa3ba13303d547324a172573d591fbd540757eae070bba4764b2bec8576df568" exitCode=0 Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.152529 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" event={"ID":"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd","Type":"ContainerDied","Data":"fa3ba13303d547324a172573d591fbd540757eae070bba4764b2bec8576df568"} Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.426782 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.507821 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7vvq\" (UniqueName: \"kubernetes.io/projected/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-kube-api-access-d7vvq\") pod \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.507919 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-swift-storage-0\") pod \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.508332 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-svc\") pod \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.508433 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-sb\") pod \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.508479 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-config\") pod \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.508543 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-nb\") pod \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.508710 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-openstack-edpm-ipam\") pod \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\" (UID: \"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd\") " Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.516726 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-kube-api-access-d7vvq" (OuterVolumeSpecName: "kube-api-access-d7vvq") pod "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" (UID: "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd"). InnerVolumeSpecName "kube-api-access-d7vvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.584861 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" (UID: "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.590422 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" (UID: "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.591561 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-config" (OuterVolumeSpecName: "config") pod "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" (UID: "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.594812 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" (UID: "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.602754 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" (UID: "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.611306 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" (UID: "a9f72aa5-bcec-4d2d-904f-e08310d8ddcd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.612613 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.612859 4689 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-config\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.612874 4689 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.612887 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.612900 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7vvq\" (UniqueName: \"kubernetes.io/projected/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-kube-api-access-d7vvq\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.612918 4689 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:39 crc kubenswrapper[4689]: I0123 11:17:39.612956 4689 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.163620 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-7db46dddd6-txhvk"] Jan 23 11:17:40 crc kubenswrapper[4689]: E0123 11:17:40.164408 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerName="init" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.164485 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerName="init" Jan 23 11:17:40 crc kubenswrapper[4689]: E0123 11:17:40.164558 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerName="dnsmasq-dns" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.164615 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerName="dnsmasq-dns" Jan 23 11:17:40 crc kubenswrapper[4689]: E0123 11:17:40.164681 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerName="init" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.164734 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerName="init" Jan 23 11:17:40 crc kubenswrapper[4689]: E0123 11:17:40.164793 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90127ad8-6a3a-402c-809f-d678f574fb09" containerName="heat-db-sync" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.164884 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="90127ad8-6a3a-402c-809f-d678f574fb09" containerName="heat-db-sync" Jan 23 11:17:40 crc kubenswrapper[4689]: E0123 11:17:40.164954 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerName="dnsmasq-dns" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.165008 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerName="dnsmasq-dns" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.165367 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" containerName="dnsmasq-dns" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.165458 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b26c1980-a23a-4a11-a321-11fe4bd6b641" containerName="dnsmasq-dns" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.165528 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="90127ad8-6a3a-402c-809f-d678f574fb09" containerName="heat-db-sync" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.166428 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.170110 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7db46dddd6-txhvk"] Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.204857 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" event={"ID":"a9f72aa5-bcec-4d2d-904f-e08310d8ddcd","Type":"ContainerDied","Data":"2e4a437e58219bd8d50f7b67f2630633e05a407df23ae0283d81eed55e2a9368"} Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.204923 4689 scope.go:117] "RemoveContainer" containerID="fa3ba13303d547324a172573d591fbd540757eae070bba4764b2bec8576df568" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.204933 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-594cb89c79-jfqrr" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.226980 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-combined-ca-bundle\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.227083 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-config-data\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.227398 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkrtn\" (UniqueName: \"kubernetes.io/projected/222ae852-00ad-449b-a92b-b0f52d2b856f-kube-api-access-hkrtn\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.227524 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-config-data-custom\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.232827 4689 scope.go:117] "RemoveContainer" containerID="ff244f4796cc8efd93a4c881274e705507687239348dac01db3bf0f756a3ce65" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.307969 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-594cb89c79-jfqrr"] Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.332574 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-config-data-custom\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.332774 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-combined-ca-bundle\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.332936 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-config-data\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.333028 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkrtn\" (UniqueName: \"kubernetes.io/projected/222ae852-00ad-449b-a92b-b0f52d2b856f-kube-api-access-hkrtn\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.347230 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-combined-ca-bundle\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.348179 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-config-data\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.353949 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/222ae852-00ad-449b-a92b-b0f52d2b856f-config-data-custom\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.357485 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkrtn\" (UniqueName: \"kubernetes.io/projected/222ae852-00ad-449b-a92b-b0f52d2b856f-kube-api-access-hkrtn\") pod \"heat-engine-7db46dddd6-txhvk\" (UID: \"222ae852-00ad-449b-a92b-b0f52d2b856f\") " pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.360014 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-594cb89c79-jfqrr"] Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.404220 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-84b55874c-d6r8c"] Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.407601 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.409170 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.462380 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-79f99b898-dt7zp"] Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.466388 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.519602 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-84b55874c-d6r8c"] Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.521555 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.551572 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-79f99b898-dt7zp"] Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566281 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-config-data-custom\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566368 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-config-data\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566415 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-internal-tls-certs\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566491 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-internal-tls-certs\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566585 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqvt9\" (UniqueName: \"kubernetes.io/projected/a256dd27-3435-4bcb-9ca0-46a0d472325b-kube-api-access-wqvt9\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566613 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-combined-ca-bundle\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566683 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-combined-ca-bundle\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566719 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-config-data\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566812 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-public-tls-certs\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.566853 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-public-tls-certs\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.567015 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdjt9\" (UniqueName: \"kubernetes.io/projected/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-kube-api-access-xdjt9\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.567078 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-config-data-custom\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.669738 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-public-tls-certs\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.669784 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-public-tls-certs\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.669859 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdjt9\" (UniqueName: \"kubernetes.io/projected/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-kube-api-access-xdjt9\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.669897 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-config-data-custom\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.669953 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-config-data-custom\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.670005 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-config-data\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.670030 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-internal-tls-certs\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.670070 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-internal-tls-certs\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.670112 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqvt9\" (UniqueName: \"kubernetes.io/projected/a256dd27-3435-4bcb-9ca0-46a0d472325b-kube-api-access-wqvt9\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.670127 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-combined-ca-bundle\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.670182 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-combined-ca-bundle\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.670205 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-config-data\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.680066 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-config-data-custom\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.683366 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-internal-tls-certs\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.688106 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-config-data\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.689011 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-combined-ca-bundle\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.693821 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-internal-tls-certs\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.694048 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-public-tls-certs\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.695999 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-config-data-custom\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.699938 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdjt9\" (UniqueName: \"kubernetes.io/projected/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-kube-api-access-xdjt9\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.701417 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-config-data\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.702349 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqvt9\" (UniqueName: \"kubernetes.io/projected/a256dd27-3435-4bcb-9ca0-46a0d472325b-kube-api-access-wqvt9\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.715413 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1-combined-ca-bundle\") pod \"heat-cfnapi-79f99b898-dt7zp\" (UID: \"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1\") " pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.716518 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a256dd27-3435-4bcb-9ca0-46a0d472325b-public-tls-certs\") pod \"heat-api-84b55874c-d6r8c\" (UID: \"a256dd27-3435-4bcb-9ca0-46a0d472325b\") " pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.763992 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:40 crc kubenswrapper[4689]: I0123 11:17:40.856106 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:41 crc kubenswrapper[4689]: I0123 11:17:41.025301 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7db46dddd6-txhvk"] Jan 23 11:17:41 crc kubenswrapper[4689]: I0123 11:17:41.216777 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7db46dddd6-txhvk" event={"ID":"222ae852-00ad-449b-a92b-b0f52d2b856f","Type":"ContainerStarted","Data":"fd108a9f685ae55396fe7399479954334ca8434507ecd04e061a0b10edebd5fc"} Jan 23 11:17:41 crc kubenswrapper[4689]: W0123 11:17:41.295465 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda256dd27_3435_4bcb_9ca0_46a0d472325b.slice/crio-e277889e2357adf53864c4d3f20f618726e4024b58531f3a8fd8079a3e7e7be6 WatchSource:0}: Error finding container e277889e2357adf53864c4d3f20f618726e4024b58531f3a8fd8079a3e7e7be6: Status 404 returned error can't find the container with id e277889e2357adf53864c4d3f20f618726e4024b58531f3a8fd8079a3e7e7be6 Jan 23 11:17:41 crc kubenswrapper[4689]: I0123 11:17:41.299114 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-84b55874c-d6r8c"] Jan 23 11:17:41 crc kubenswrapper[4689]: I0123 11:17:41.462069 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-79f99b898-dt7zp"] Jan 23 11:17:41 crc kubenswrapper[4689]: I0123 11:17:41.652732 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9f72aa5-bcec-4d2d-904f-e08310d8ddcd" path="/var/lib/kubelet/pods/a9f72aa5-bcec-4d2d-904f-e08310d8ddcd/volumes" Jan 23 11:17:42 crc kubenswrapper[4689]: I0123 11:17:42.230510 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-84b55874c-d6r8c" event={"ID":"a256dd27-3435-4bcb-9ca0-46a0d472325b","Type":"ContainerStarted","Data":"e277889e2357adf53864c4d3f20f618726e4024b58531f3a8fd8079a3e7e7be6"} Jan 23 11:17:42 crc kubenswrapper[4689]: I0123 11:17:42.231790 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-79f99b898-dt7zp" event={"ID":"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1","Type":"ContainerStarted","Data":"b0313e4a5f6a394429497b065fb261b2824cb9d2a6cff2b06a3013d25d3a7f9a"} Jan 23 11:17:46 crc kubenswrapper[4689]: I0123 11:17:46.291810 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7db46dddd6-txhvk" event={"ID":"222ae852-00ad-449b-a92b-b0f52d2b856f","Type":"ContainerStarted","Data":"412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84"} Jan 23 11:17:46 crc kubenswrapper[4689]: I0123 11:17:46.292410 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:17:46 crc kubenswrapper[4689]: I0123 11:17:46.316662 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-7db46dddd6-txhvk" podStartSLOduration=6.316642065 podStartE2EDuration="6.316642065s" podCreationTimestamp="2026-01-23 11:17:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:17:46.305139078 +0000 UTC m=+1730.929818937" watchObservedRunningTime="2026-01-23 11:17:46.316642065 +0000 UTC m=+1730.941321924" Jan 23 11:17:47 crc kubenswrapper[4689]: I0123 11:17:47.641735 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:17:47 crc kubenswrapper[4689]: E0123 11:17:47.642737 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:17:48 crc kubenswrapper[4689]: I0123 11:17:48.318557 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-84b55874c-d6r8c" event={"ID":"a256dd27-3435-4bcb-9ca0-46a0d472325b","Type":"ContainerStarted","Data":"5e36ef42cfb1aea0a923884dc7c09b4d5a0ac90fefa46f47723b2435ccef958f"} Jan 23 11:17:48 crc kubenswrapper[4689]: I0123 11:17:48.319011 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:48 crc kubenswrapper[4689]: I0123 11:17:48.320422 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-79f99b898-dt7zp" event={"ID":"04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1","Type":"ContainerStarted","Data":"2cf12da8dc4af0b1503c5e798315a147a24913ba6b546641cd2b8e530d3e2d1e"} Jan 23 11:17:48 crc kubenswrapper[4689]: I0123 11:17:48.320564 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:48 crc kubenswrapper[4689]: I0123 11:17:48.351652 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-84b55874c-d6r8c" podStartSLOduration=1.9666586339999999 podStartE2EDuration="8.351633551s" podCreationTimestamp="2026-01-23 11:17:40 +0000 UTC" firstStartedPulling="2026-01-23 11:17:41.298306251 +0000 UTC m=+1725.922986130" lastFinishedPulling="2026-01-23 11:17:47.683281178 +0000 UTC m=+1732.307961047" observedRunningTime="2026-01-23 11:17:48.343029977 +0000 UTC m=+1732.967709836" watchObservedRunningTime="2026-01-23 11:17:48.351633551 +0000 UTC m=+1732.976313410" Jan 23 11:17:48 crc kubenswrapper[4689]: I0123 11:17:48.371657 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-79f99b898-dt7zp" podStartSLOduration=2.178519024 podStartE2EDuration="8.371633279s" podCreationTimestamp="2026-01-23 11:17:40 +0000 UTC" firstStartedPulling="2026-01-23 11:17:41.491208258 +0000 UTC m=+1726.115888117" lastFinishedPulling="2026-01-23 11:17:47.684322513 +0000 UTC m=+1732.309002372" observedRunningTime="2026-01-23 11:17:48.358767289 +0000 UTC m=+1732.983447158" watchObservedRunningTime="2026-01-23 11:17:48.371633279 +0000 UTC m=+1732.996313138" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.760685 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh"] Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.762646 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.764793 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.764816 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.765719 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.767164 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.777340 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh"] Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.853531 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmfht\" (UniqueName: \"kubernetes.io/projected/5e40e6bf-2a52-4686-a459-50df12dfb406-kube-api-access-pmfht\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.853578 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.853615 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.853829 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.956682 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmfht\" (UniqueName: \"kubernetes.io/projected/5e40e6bf-2a52-4686-a459-50df12dfb406-kube-api-access-pmfht\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.956762 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.956814 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.957223 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.963863 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.964565 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.969844 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:51 crc kubenswrapper[4689]: I0123 11:17:51.979976 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmfht\" (UniqueName: \"kubernetes.io/projected/5e40e6bf-2a52-4686-a459-50df12dfb406-kube-api-access-pmfht\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:52 crc kubenswrapper[4689]: I0123 11:17:52.086868 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:17:52 crc kubenswrapper[4689]: W0123 11:17:52.881796 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e40e6bf_2a52_4686_a459_50df12dfb406.slice/crio-5153ea9bfd5c938ca4a0c5612d7ff279d6fc466b50807f3ec911357563b6bb7e WatchSource:0}: Error finding container 5153ea9bfd5c938ca4a0c5612d7ff279d6fc466b50807f3ec911357563b6bb7e: Status 404 returned error can't find the container with id 5153ea9bfd5c938ca4a0c5612d7ff279d6fc466b50807f3ec911357563b6bb7e Jan 23 11:17:52 crc kubenswrapper[4689]: I0123 11:17:52.882896 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh"] Jan 23 11:17:53 crc kubenswrapper[4689]: I0123 11:17:53.381706 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" event={"ID":"5e40e6bf-2a52-4686-a459-50df12dfb406","Type":"ContainerStarted","Data":"5153ea9bfd5c938ca4a0c5612d7ff279d6fc466b50807f3ec911357563b6bb7e"} Jan 23 11:17:56 crc kubenswrapper[4689]: I0123 11:17:56.451051 4689 generic.go:334] "Generic (PLEG): container finished" podID="907261fc-5550-4dd8-b645-0341b4bdd4de" containerID="a6eaf0101ac6528cf5089f88344868ea01dca898a4fec0fce4ca15383e1dd55f" exitCode=0 Jan 23 11:17:56 crc kubenswrapper[4689]: I0123 11:17:56.451168 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"907261fc-5550-4dd8-b645-0341b4bdd4de","Type":"ContainerDied","Data":"a6eaf0101ac6528cf5089f88344868ea01dca898a4fec0fce4ca15383e1dd55f"} Jan 23 11:17:56 crc kubenswrapper[4689]: I0123 11:17:56.468598 4689 generic.go:334] "Generic (PLEG): container finished" podID="5f60dabd-d5a7-417e-a9f4-2a9f06e4778d" containerID="b2084a217bf424b3e554137b9579924718e2f7908539928912fa079c4a2042d9" exitCode=0 Jan 23 11:17:56 crc kubenswrapper[4689]: I0123 11:17:56.468834 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d","Type":"ContainerDied","Data":"b2084a217bf424b3e554137b9579924718e2f7908539928912fa079c4a2042d9"} Jan 23 11:17:57 crc kubenswrapper[4689]: I0123 11:17:57.499289 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"907261fc-5550-4dd8-b645-0341b4bdd4de","Type":"ContainerStarted","Data":"f509bb46f70bf9a80fa1276b44994dac422251a6b0313a7edf4d771af3d8c74c"} Jan 23 11:17:57 crc kubenswrapper[4689]: I0123 11:17:57.500038 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Jan 23 11:17:57 crc kubenswrapper[4689]: I0123 11:17:57.502211 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5f60dabd-d5a7-417e-a9f4-2a9f06e4778d","Type":"ContainerStarted","Data":"28f790982d442a05444cf27de39a05538d624b460f1151926ba6246d2804e1ac"} Jan 23 11:17:57 crc kubenswrapper[4689]: I0123 11:17:57.503049 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:17:57 crc kubenswrapper[4689]: I0123 11:17:57.530449 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=36.530433452 podStartE2EDuration="36.530433452s" podCreationTimestamp="2026-01-23 11:17:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:17:57.528849703 +0000 UTC m=+1742.153529562" watchObservedRunningTime="2026-01-23 11:17:57.530433452 +0000 UTC m=+1742.155113311" Jan 23 11:17:57 crc kubenswrapper[4689]: I0123 11:17:57.568021 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.567988717 podStartE2EDuration="37.567988717s" podCreationTimestamp="2026-01-23 11:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:17:57.563900046 +0000 UTC m=+1742.188579905" watchObservedRunningTime="2026-01-23 11:17:57.567988717 +0000 UTC m=+1742.192668576" Jan 23 11:17:58 crc kubenswrapper[4689]: I0123 11:17:58.300655 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-84b55874c-d6r8c" Jan 23 11:17:58 crc kubenswrapper[4689]: I0123 11:17:58.402542 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5fbcd48894-jz4gg"] Jan 23 11:17:58 crc kubenswrapper[4689]: I0123 11:17:58.402765 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-5fbcd48894-jz4gg" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerName="heat-api" containerID="cri-o://c7541d58f36f5a6d744f87fb54b40c1db0c29b2e35533d5d8be8ef20959b28ad" gracePeriod=60 Jan 23 11:17:58 crc kubenswrapper[4689]: I0123 11:17:58.408652 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-79f99b898-dt7zp" Jan 23 11:17:58 crc kubenswrapper[4689]: I0123 11:17:58.490776 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-ffbc4d8cf-mv5hl"] Jan 23 11:17:58 crc kubenswrapper[4689]: I0123 11:17:58.491006 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerName="heat-cfnapi" containerID="cri-o://3b8b75a4bd6042b7f5dbf9033caaa0336686db6e8c4eb7a3677138a36f8922c5" gracePeriod=60 Jan 23 11:17:59 crc kubenswrapper[4689]: I0123 11:17:59.640140 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:17:59 crc kubenswrapper[4689]: E0123 11:17:59.640788 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:18:01 crc kubenswrapper[4689]: I0123 11:18:01.666055 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-5fbcd48894-jz4gg" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.228:8004/healthcheck\": read tcp 10.217.0.2:39898->10.217.0.228:8004: read: connection reset by peer" Jan 23 11:18:01 crc kubenswrapper[4689]: I0123 11:18:01.688431 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.229:8000/healthcheck\": read tcp 10.217.0.2:43266->10.217.0.229:8000: read: connection reset by peer" Jan 23 11:18:03 crc kubenswrapper[4689]: I0123 11:18:03.055895 4689 scope.go:117] "RemoveContainer" containerID="4a9a449081748ed764ee2d33be3a3fc6da7857eac3c98ea4c36f4f6e79b58d07" Jan 23 11:18:03 crc kubenswrapper[4689]: I0123 11:18:03.777553 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 11:18:03 crc kubenswrapper[4689]: I0123 11:18:03.782794 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 11:18:05 crc kubenswrapper[4689]: I0123 11:18:05.172249 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 3.175912536s: [/var/lib/containers/storage/overlay/71b2d9885076917b7db8538139222fa00dcc449ececcf9a437b0ef0c98d092e6/diff /var/log/pods/openstack_barbican-worker-78cb64f85f-sf2tl_a7920087-af57-4092-8d74-0bcb75fc9e9d/barbican-worker/0.log]; will not log again for this container unless duration exceeds 2s Jan 23 11:18:05 crc kubenswrapper[4689]: I0123 11:18:05.209988 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 11:18:05 crc kubenswrapper[4689]: I0123 11:18:05.315283 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6b6f6df6f9-dvwpc"] Jan 23 11:18:05 crc kubenswrapper[4689]: I0123 11:18:05.315559 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" podUID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" containerName="heat-engine" containerID="cri-o://5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" gracePeriod=60 Jan 23 11:18:06 crc kubenswrapper[4689]: I0123 11:18:06.291591 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-5fbcd48894-jz4gg" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.228:8004/healthcheck\": dial tcp 10.217.0.228:8004: connect: connection refused" Jan 23 11:18:06 crc kubenswrapper[4689]: I0123 11:18:06.302739 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.229:8000/healthcheck\": dial tcp 10.217.0.229:8000: connect: connection refused" Jan 23 11:18:06 crc kubenswrapper[4689]: I0123 11:18:06.778792 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z5sn9" podUID="90c7af03-d2b6-45ef-b228-d5621bf1f671" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 23 11:18:08 crc kubenswrapper[4689]: E0123 11:18:08.108482 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:18:08 crc kubenswrapper[4689]: E0123 11:18:08.114911 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:18:08 crc kubenswrapper[4689]: E0123 11:18:08.116643 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:18:08 crc kubenswrapper[4689]: E0123 11:18:08.116737 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" podUID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" containerName="heat-engine" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.526643 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-kv9jt"] Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.554965 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-kv9jt"] Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.607891 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-fpbbp"] Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.609448 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.613878 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.630972 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-fpbbp"] Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.732452 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2ff2\" (UniqueName: \"kubernetes.io/projected/30e7aa97-9923-4441-9a8a-5f320f5f3b85-kube-api-access-s2ff2\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.732917 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-combined-ca-bundle\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.733210 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-config-data\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.733281 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-scripts\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.835432 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-config-data\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.835517 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-scripts\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.835563 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2ff2\" (UniqueName: \"kubernetes.io/projected/30e7aa97-9923-4441-9a8a-5f320f5f3b85-kube-api-access-s2ff2\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.835643 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-combined-ca-bundle\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.843588 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-scripts\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.859119 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-combined-ca-bundle\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.859538 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-config-data\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.864695 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2ff2\" (UniqueName: \"kubernetes.io/projected/30e7aa97-9923-4441-9a8a-5f320f5f3b85-kube-api-access-s2ff2\") pod \"aodh-db-sync-fpbbp\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:08 crc kubenswrapper[4689]: I0123 11:18:08.961968 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:09 crc kubenswrapper[4689]: I0123 11:18:09.655101 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c" path="/var/lib/kubelet/pods/d6d8e7cf-c5cc-478e-a2e6-bf7fc584d70c/volumes" Jan 23 11:18:10 crc kubenswrapper[4689]: I0123 11:18:10.641095 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:18:10 crc kubenswrapper[4689]: E0123 11:18:10.641761 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.292070 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-5fbcd48894-jz4gg" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.228:8004/healthcheck\": dial tcp 10.217.0.228:8004: connect: connection refused" Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.292484 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.301812 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.229:8000/healthcheck\": dial tcp 10.217.0.229:8000: connect: connection refused" Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.302249 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.418752 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="5f60dabd-d5a7-417e-a9f4-2a9f06e4778d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.17:5671: connect: connection refused" Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.519289 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="907261fc-5550-4dd8-b645-0341b4bdd4de" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.18:5671: connect: connection refused" Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.701968 4689 generic.go:334] "Generic (PLEG): container finished" podID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerID="3b8b75a4bd6042b7f5dbf9033caaa0336686db6e8c4eb7a3677138a36f8922c5" exitCode=0 Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.702053 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" event={"ID":"4b102ee6-4978-45ed-9026-94e1c433d3f6","Type":"ContainerDied","Data":"3b8b75a4bd6042b7f5dbf9033caaa0336686db6e8c4eb7a3677138a36f8922c5"} Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.704601 4689 generic.go:334] "Generic (PLEG): container finished" podID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerID="c7541d58f36f5a6d744f87fb54b40c1db0c29b2e35533d5d8be8ef20959b28ad" exitCode=0 Jan 23 11:18:11 crc kubenswrapper[4689]: I0123 11:18:11.704639 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fbcd48894-jz4gg" event={"ID":"cd9d1a8d-fd0e-4155-8085-5584c456cecb","Type":"ContainerDied","Data":"c7541d58f36f5a6d744f87fb54b40c1db0c29b2e35533d5d8be8ef20959b28ad"} Jan 23 11:18:14 crc kubenswrapper[4689]: E0123 11:18:14.705213 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest" Jan 23 11:18:14 crc kubenswrapper[4689]: E0123 11:18:14.706014 4689 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 11:18:14 crc kubenswrapper[4689]: container &Container{Name:repo-setup-edpm-deployment-openstack-edpm-ipam,Image:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,Command:[],Args:[ansible-runner run /runner -p playbook.yaml -i repo-setup-edpm-deployment-openstack-edpm-ipam],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:ANSIBLE_VERBOSITY,Value:2,ValueFrom:nil,},EnvVar{Name:RUNNER_PLAYBOOK,Value: Jan 23 11:18:14 crc kubenswrapper[4689]: - hosts: all Jan 23 11:18:14 crc kubenswrapper[4689]: strategy: linear Jan 23 11:18:14 crc kubenswrapper[4689]: tasks: Jan 23 11:18:14 crc kubenswrapper[4689]: - name: Enable podified-repos Jan 23 11:18:14 crc kubenswrapper[4689]: become: true Jan 23 11:18:14 crc kubenswrapper[4689]: ansible.builtin.shell: | Jan 23 11:18:14 crc kubenswrapper[4689]: set -euxo pipefail Jan 23 11:18:14 crc kubenswrapper[4689]: pushd /var/tmp Jan 23 11:18:14 crc kubenswrapper[4689]: curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz Jan 23 11:18:14 crc kubenswrapper[4689]: pushd repo-setup-main Jan 23 11:18:14 crc kubenswrapper[4689]: python3 -m venv ./venv Jan 23 11:18:14 crc kubenswrapper[4689]: PBR_VERSION=0.0.0 ./venv/bin/pip install ./ Jan 23 11:18:14 crc kubenswrapper[4689]: ./venv/bin/repo-setup current-podified -b antelope Jan 23 11:18:14 crc kubenswrapper[4689]: popd Jan 23 11:18:14 crc kubenswrapper[4689]: rm -rf repo-setup-main Jan 23 11:18:14 crc kubenswrapper[4689]: Jan 23 11:18:14 crc kubenswrapper[4689]: Jan 23 11:18:14 crc kubenswrapper[4689]: ,ValueFrom:nil,},EnvVar{Name:RUNNER_EXTRA_VARS,Value: Jan 23 11:18:14 crc kubenswrapper[4689]: edpm_override_hosts: openstack-edpm-ipam Jan 23 11:18:14 crc kubenswrapper[4689]: edpm_service_type: repo-setup Jan 23 11:18:14 crc kubenswrapper[4689]: Jan 23 11:18:14 crc kubenswrapper[4689]: Jan 23 11:18:14 crc kubenswrapper[4689]: ,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:repo-setup-combined-ca-bundle,ReadOnly:false,MountPath:/var/lib/openstack/cacerts/repo-setup,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key-openstack-edpm-ipam,ReadOnly:false,MountPath:/runner/env/ssh_key/ssh_key_openstack-edpm-ipam,SubPath:ssh_key_openstack-edpm-ipam,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:inventory,ReadOnly:false,MountPath:/runner/inventory/hosts,SubPath:inventory,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pmfht,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:openstack-aee-default-env,},Optional:*true,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh_openstack(5e40e6bf-2a52-4686-a459-50df12dfb406): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Jan 23 11:18:14 crc kubenswrapper[4689]: > logger="UnhandledError" Jan 23 11:18:14 crc kubenswrapper[4689]: E0123 11:18:14.708875 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" podUID="5e40e6bf-2a52-4686-a459-50df12dfb406" Jan 23 11:18:14 crc kubenswrapper[4689]: I0123 11:18:14.726838 4689 scope.go:117] "RemoveContainer" containerID="b62357a0edaa657342ce222555b749c18cdd4c1de955d1d28e60f15ab02b2887" Jan 23 11:18:14 crc kubenswrapper[4689]: E0123 11:18:14.788425 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest\\\"\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" podUID="5e40e6bf-2a52-4686-a459-50df12dfb406" Jan 23 11:18:14 crc kubenswrapper[4689]: I0123 11:18:14.921366 4689 scope.go:117] "RemoveContainer" containerID="19f914e5c2f2934f55e25abc740a4c3371b6bd86f7e4fc9a8a3d1fd9e1246f62" Jan 23 11:18:14 crc kubenswrapper[4689]: I0123 11:18:14.993018 4689 scope.go:117] "RemoveContainer" containerID="bfb1a1bc7ca07e3d7df7950723940e4b100dbaa2dc5fdb7419862d4cc47d96f8" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.020021 4689 scope.go:117] "RemoveContainer" containerID="5203fdef91227f2ea52817270e91a2758ad79b91487523ccdae108c74a347ffe" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.045225 4689 scope.go:117] "RemoveContainer" containerID="fa194d92b4d55899383f8e8fd404a9d907f9d724fd04054efc3c783551079984" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.081087 4689 scope.go:117] "RemoveContainer" containerID="477a61405d5cddaa22f592d37dc0b1a76176ff22521ab00ad5520c4c7c2d41ea" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.197195 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.280321 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfnhn\" (UniqueName: \"kubernetes.io/projected/4b102ee6-4978-45ed-9026-94e1c433d3f6-kube-api-access-jfnhn\") pod \"4b102ee6-4978-45ed-9026-94e1c433d3f6\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.280423 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data-custom\") pod \"4b102ee6-4978-45ed-9026-94e1c433d3f6\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.280491 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-combined-ca-bundle\") pod \"4b102ee6-4978-45ed-9026-94e1c433d3f6\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.280551 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data\") pod \"4b102ee6-4978-45ed-9026-94e1c433d3f6\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.280628 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-internal-tls-certs\") pod \"4b102ee6-4978-45ed-9026-94e1c433d3f6\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.280896 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-public-tls-certs\") pod \"4b102ee6-4978-45ed-9026-94e1c433d3f6\" (UID: \"4b102ee6-4978-45ed-9026-94e1c433d3f6\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.299444 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b102ee6-4978-45ed-9026-94e1c433d3f6-kube-api-access-jfnhn" (OuterVolumeSpecName: "kube-api-access-jfnhn") pod "4b102ee6-4978-45ed-9026-94e1c433d3f6" (UID: "4b102ee6-4978-45ed-9026-94e1c433d3f6"). InnerVolumeSpecName "kube-api-access-jfnhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.355260 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4b102ee6-4978-45ed-9026-94e1c433d3f6" (UID: "4b102ee6-4978-45ed-9026-94e1c433d3f6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.385020 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfnhn\" (UniqueName: \"kubernetes.io/projected/4b102ee6-4978-45ed-9026-94e1c433d3f6-kube-api-access-jfnhn\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.385049 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.392179 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.411555 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4b102ee6-4978-45ed-9026-94e1c433d3f6" (UID: "4b102ee6-4978-45ed-9026-94e1c433d3f6"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.448038 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-fpbbp"] Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.457350 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b102ee6-4978-45ed-9026-94e1c433d3f6" (UID: "4b102ee6-4978-45ed-9026-94e1c433d3f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.479071 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data" (OuterVolumeSpecName: "config-data") pod "4b102ee6-4978-45ed-9026-94e1c433d3f6" (UID: "4b102ee6-4978-45ed-9026-94e1c433d3f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.486367 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data-custom\") pod \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.486469 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-public-tls-certs\") pod \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.486555 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vt5q\" (UniqueName: \"kubernetes.io/projected/cd9d1a8d-fd0e-4155-8085-5584c456cecb-kube-api-access-4vt5q\") pod \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.486593 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-combined-ca-bundle\") pod \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.487160 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-internal-tls-certs\") pod \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.487379 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data\") pod \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\" (UID: \"cd9d1a8d-fd0e-4155-8085-5584c456cecb\") " Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.488241 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.488267 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.488280 4689 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.489635 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cd9d1a8d-fd0e-4155-8085-5584c456cecb" (UID: "cd9d1a8d-fd0e-4155-8085-5584c456cecb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.491259 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4b102ee6-4978-45ed-9026-94e1c433d3f6" (UID: "4b102ee6-4978-45ed-9026-94e1c433d3f6"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.491992 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd9d1a8d-fd0e-4155-8085-5584c456cecb-kube-api-access-4vt5q" (OuterVolumeSpecName: "kube-api-access-4vt5q") pod "cd9d1a8d-fd0e-4155-8085-5584c456cecb" (UID: "cd9d1a8d-fd0e-4155-8085-5584c456cecb"). InnerVolumeSpecName "kube-api-access-4vt5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.529403 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd9d1a8d-fd0e-4155-8085-5584c456cecb" (UID: "cd9d1a8d-fd0e-4155-8085-5584c456cecb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.569487 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cd9d1a8d-fd0e-4155-8085-5584c456cecb" (UID: "cd9d1a8d-fd0e-4155-8085-5584c456cecb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.574458 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data" (OuterVolumeSpecName: "config-data") pod "cd9d1a8d-fd0e-4155-8085-5584c456cecb" (UID: "cd9d1a8d-fd0e-4155-8085-5584c456cecb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.590882 4689 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b102ee6-4978-45ed-9026-94e1c433d3f6-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.590910 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.590920 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.590929 4689 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.590938 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vt5q\" (UniqueName: \"kubernetes.io/projected/cd9d1a8d-fd0e-4155-8085-5584c456cecb-kube-api-access-4vt5q\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.590952 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.592300 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cd9d1a8d-fd0e-4155-8085-5584c456cecb" (UID: "cd9d1a8d-fd0e-4155-8085-5584c456cecb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.692864 4689 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9d1a8d-fd0e-4155-8085-5584c456cecb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.791259 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-fpbbp" event={"ID":"30e7aa97-9923-4441-9a8a-5f320f5f3b85","Type":"ContainerStarted","Data":"bb6a8e5d8ecf7298b34e909fc3abb97f7969aabe91a9623c1ab914aee9352698"} Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.793552 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" event={"ID":"4b102ee6-4978-45ed-9026-94e1c433d3f6","Type":"ContainerDied","Data":"1e802ff88d1e2f8a5ae85c05e71005444ec3f63509b5692cfd714a6a0a4c0a37"} Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.793573 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-ffbc4d8cf-mv5hl" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.793610 4689 scope.go:117] "RemoveContainer" containerID="3b8b75a4bd6042b7f5dbf9033caaa0336686db6e8c4eb7a3677138a36f8922c5" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.797856 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fbcd48894-jz4gg" event={"ID":"cd9d1a8d-fd0e-4155-8085-5584c456cecb","Type":"ContainerDied","Data":"876be0fa5f4603f63ab2d41573c7aca0b4a89ad6c9a521ae65f93defcdf5fd92"} Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.797917 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fbcd48894-jz4gg" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.837203 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-ffbc4d8cf-mv5hl"] Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.849742 4689 scope.go:117] "RemoveContainer" containerID="c7541d58f36f5a6d744f87fb54b40c1db0c29b2e35533d5d8be8ef20959b28ad" Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.852664 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-ffbc4d8cf-mv5hl"] Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.864363 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5fbcd48894-jz4gg"] Jan 23 11:18:15 crc kubenswrapper[4689]: I0123 11:18:15.874945 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5fbcd48894-jz4gg"] Jan 23 11:18:15 crc kubenswrapper[4689]: E0123 11:18:15.983393 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd9d1a8d_fd0e_4155_8085_5584c456cecb.slice/crio-876be0fa5f4603f63ab2d41573c7aca0b4a89ad6c9a521ae65f93defcdf5fd92\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b102ee6_4978_45ed_9026_94e1c433d3f6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b102ee6_4978_45ed_9026_94e1c433d3f6.slice/crio-1e802ff88d1e2f8a5ae85c05e71005444ec3f63509b5692cfd714a6a0a4c0a37\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd9d1a8d_fd0e_4155_8085_5584c456cecb.slice\": RecentStats: unable to find data in memory cache]" Jan 23 11:18:17 crc kubenswrapper[4689]: I0123 11:18:17.655449 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" path="/var/lib/kubelet/pods/4b102ee6-4978-45ed-9026-94e1c433d3f6/volumes" Jan 23 11:18:17 crc kubenswrapper[4689]: I0123 11:18:17.657825 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" path="/var/lib/kubelet/pods/cd9d1a8d-fd0e-4155-8085-5584c456cecb/volumes" Jan 23 11:18:18 crc kubenswrapper[4689]: E0123 11:18:18.107167 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431 is running failed: container process not found" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:18:18 crc kubenswrapper[4689]: E0123 11:18:18.108537 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431 is running failed: container process not found" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:18:18 crc kubenswrapper[4689]: E0123 11:18:18.108829 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431 is running failed: container process not found" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 11:18:18 crc kubenswrapper[4689]: E0123 11:18:18.108891 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431 is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" podUID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" containerName="heat-engine" Jan 23 11:18:18 crc kubenswrapper[4689]: I0123 11:18:18.839791 4689 generic.go:334] "Generic (PLEG): container finished" podID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" exitCode=0 Jan 23 11:18:18 crc kubenswrapper[4689]: I0123 11:18:18.839832 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" event={"ID":"aa54a9d4-e837-442f-9d18-5e7b0a05e807","Type":"ContainerDied","Data":"5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431"} Jan 23 11:18:21 crc kubenswrapper[4689]: I0123 11:18:21.406386 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 23 11:18:21 crc kubenswrapper[4689]: I0123 11:18:21.519010 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Jan 23 11:18:21 crc kubenswrapper[4689]: I0123 11:18:21.626818 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.473862 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.584263 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-combined-ca-bundle\") pod \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.584330 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skq7h\" (UniqueName: \"kubernetes.io/projected/aa54a9d4-e837-442f-9d18-5e7b0a05e807-kube-api-access-skq7h\") pod \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.584416 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data-custom\") pod \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.584706 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data\") pod \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\" (UID: \"aa54a9d4-e837-442f-9d18-5e7b0a05e807\") " Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.590264 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "aa54a9d4-e837-442f-9d18-5e7b0a05e807" (UID: "aa54a9d4-e837-442f-9d18-5e7b0a05e807"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.590597 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa54a9d4-e837-442f-9d18-5e7b0a05e807-kube-api-access-skq7h" (OuterVolumeSpecName: "kube-api-access-skq7h") pod "aa54a9d4-e837-442f-9d18-5e7b0a05e807" (UID: "aa54a9d4-e837-442f-9d18-5e7b0a05e807"). InnerVolumeSpecName "kube-api-access-skq7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.618540 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa54a9d4-e837-442f-9d18-5e7b0a05e807" (UID: "aa54a9d4-e837-442f-9d18-5e7b0a05e807"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.645459 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data" (OuterVolumeSpecName: "config-data") pod "aa54a9d4-e837-442f-9d18-5e7b0a05e807" (UID: "aa54a9d4-e837-442f-9d18-5e7b0a05e807"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.688292 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.688325 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.688338 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skq7h\" (UniqueName: \"kubernetes.io/projected/aa54a9d4-e837-442f-9d18-5e7b0a05e807-kube-api-access-skq7h\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.688350 4689 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aa54a9d4-e837-442f-9d18-5e7b0a05e807-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.911216 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-fpbbp" event={"ID":"30e7aa97-9923-4441-9a8a-5f320f5f3b85","Type":"ContainerStarted","Data":"92444a0342b62ea610a7f32f254b21f34bf6d604c290564fc653d7afce9f5f14"} Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.916297 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" event={"ID":"aa54a9d4-e837-442f-9d18-5e7b0a05e807","Type":"ContainerDied","Data":"6a9438c1d04252e4663af9cc2311c89f3f2d4afe29396ec1aebc8b47f77692c6"} Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.916366 4689 scope.go:117] "RemoveContainer" containerID="5f348e53e4c02f36c00ddb7f50f0998899ed13de3d8b2a1d0871d9140726b431" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.916627 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6b6f6df6f9-dvwpc" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.943789 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-fpbbp" podStartSLOduration=8.294085863 podStartE2EDuration="16.943767449s" podCreationTimestamp="2026-01-23 11:18:08 +0000 UTC" firstStartedPulling="2026-01-23 11:18:15.47529925 +0000 UTC m=+1760.099979109" lastFinishedPulling="2026-01-23 11:18:24.124980836 +0000 UTC m=+1768.749660695" observedRunningTime="2026-01-23 11:18:24.931529163 +0000 UTC m=+1769.556209022" watchObservedRunningTime="2026-01-23 11:18:24.943767449 +0000 UTC m=+1769.568447308" Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.979383 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6b6f6df6f9-dvwpc"] Jan 23 11:18:24 crc kubenswrapper[4689]: I0123 11:18:24.990660 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-6b6f6df6f9-dvwpc"] Jan 23 11:18:25 crc kubenswrapper[4689]: I0123 11:18:25.646707 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:18:25 crc kubenswrapper[4689]: E0123 11:18:25.647316 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:18:25 crc kubenswrapper[4689]: I0123 11:18:25.656823 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" path="/var/lib/kubelet/pods/aa54a9d4-e837-442f-9d18-5e7b0a05e807/volumes" Jan 23 11:18:27 crc kubenswrapper[4689]: I0123 11:18:27.706494 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-1" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="rabbitmq" containerID="cri-o://3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf" gracePeriod=604794 Jan 23 11:18:29 crc kubenswrapper[4689]: I0123 11:18:29.203392 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:18:30 crc kubenswrapper[4689]: I0123 11:18:30.976374 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 23 11:18:31 crc kubenswrapper[4689]: I0123 11:18:31.762369 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:18:32 crc kubenswrapper[4689]: I0123 11:18:32.965296 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 3.980614506s: [/var/lib/containers/storage/overlay/fa135c16c77a8c1f46115556ce8b2c96359beb2517738c826a7965a62a608160/diff /var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-pmksl_dc66c81d-1ea4-494e-99d4-1fbdb64a4a47/multus-admission-controller/0.log]; will not log again for this container unless duration exceeds 2s Jan 23 11:18:34 crc kubenswrapper[4689]: I0123 11:18:34.447351 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.211:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:18:41 crc kubenswrapper[4689]: I0123 11:18:40.640738 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:18:41 crc kubenswrapper[4689]: E0123 11:18:40.641698 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:18:41 crc kubenswrapper[4689]: I0123 11:18:40.976875 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 23 11:18:41 crc kubenswrapper[4689]: I0123 11:18:41.304201 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:18:42 crc kubenswrapper[4689]: I0123 11:18:42.205834 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" event={"ID":"5e40e6bf-2a52-4686-a459-50df12dfb406","Type":"ContainerStarted","Data":"3c8d2273491199afc34319dad682650dd10a8a23d611be0aa4f84eb331c62104"} Jan 23 11:18:42 crc kubenswrapper[4689]: I0123 11:18:42.232605 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" podStartSLOduration=2.818390628 podStartE2EDuration="51.232585639s" podCreationTimestamp="2026-01-23 11:17:51 +0000 UTC" firstStartedPulling="2026-01-23 11:17:52.886619611 +0000 UTC m=+1737.511299490" lastFinishedPulling="2026-01-23 11:18:41.300814642 +0000 UTC m=+1785.925494501" observedRunningTime="2026-01-23 11:18:42.225505872 +0000 UTC m=+1786.850185731" watchObservedRunningTime="2026-01-23 11:18:42.232585639 +0000 UTC m=+1786.857265498" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.218582 4689 generic.go:334] "Generic (PLEG): container finished" podID="30e7aa97-9923-4441-9a8a-5f320f5f3b85" containerID="92444a0342b62ea610a7f32f254b21f34bf6d604c290564fc653d7afce9f5f14" exitCode=0 Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.218640 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-fpbbp" event={"ID":"30e7aa97-9923-4441-9a8a-5f320f5f3b85","Type":"ContainerDied","Data":"92444a0342b62ea610a7f32f254b21f34bf6d604c290564fc653d7afce9f5f14"} Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.742183 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.892957 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-plugins-conf\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.893325 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cee46a2e-5707-4ade-a456-ed3466f9e969-erlang-cookie-secret\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.893409 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-config-data\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.893434 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-tls\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.893604 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-plugins\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.893680 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-erlang-cookie\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.893982 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.894361 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.894505 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.894569 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-confd\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.894677 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-server-conf\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.894731 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cee46a2e-5707-4ade-a456-ed3466f9e969-pod-info\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.894792 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jd68k\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-kube-api-access-jd68k\") pod \"cee46a2e-5707-4ade-a456-ed3466f9e969\" (UID: \"cee46a2e-5707-4ade-a456-ed3466f9e969\") " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.896047 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.896083 4689 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.894789 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.899748 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.904281 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cee46a2e-5707-4ade-a456-ed3466f9e969-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.918575 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-kube-api-access-jd68k" (OuterVolumeSpecName: "kube-api-access-jd68k") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "kube-api-access-jd68k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.921072 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/cee46a2e-5707-4ade-a456-ed3466f9e969-pod-info" (OuterVolumeSpecName: "pod-info") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.938133 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f" (OuterVolumeSpecName: "persistence") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.945777 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-config-data" (OuterVolumeSpecName: "config-data") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.978748 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-server-conf" (OuterVolumeSpecName: "server-conf") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998073 4689 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cee46a2e-5707-4ade-a456-ed3466f9e969-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998119 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998134 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998165 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998206 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") on node \"crc\" " Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998225 4689 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cee46a2e-5707-4ade-a456-ed3466f9e969-server-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998241 4689 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cee46a2e-5707-4ade-a456-ed3466f9e969-pod-info\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:43 crc kubenswrapper[4689]: I0123 11:18:43.998254 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jd68k\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-kube-api-access-jd68k\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.032232 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.032619 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f") on node "crc" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.041902 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "cee46a2e-5707-4ade-a456-ed3466f9e969" (UID: "cee46a2e-5707-4ade-a456-ed3466f9e969"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.102117 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.102186 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cee46a2e-5707-4ade-a456-ed3466f9e969-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.234199 4689 generic.go:334] "Generic (PLEG): container finished" podID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerID="3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf" exitCode=0 Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.234287 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.234333 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"cee46a2e-5707-4ade-a456-ed3466f9e969","Type":"ContainerDied","Data":"3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf"} Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.234366 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"cee46a2e-5707-4ade-a456-ed3466f9e969","Type":"ContainerDied","Data":"333dd15934864753703a26671b7adafe9452ed8636fcc61fb6aa2465956d0ad5"} Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.234387 4689 scope.go:117] "RemoveContainer" containerID="3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.277558 4689 scope.go:117] "RemoveContainer" containerID="56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.305076 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.347324 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.352610 4689 scope.go:117] "RemoveContainer" containerID="3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf" Jan 23 11:18:44 crc kubenswrapper[4689]: E0123 11:18:44.352910 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf\": container with ID starting with 3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf not found: ID does not exist" containerID="3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.352938 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf"} err="failed to get container status \"3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf\": rpc error: code = NotFound desc = could not find container \"3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf\": container with ID starting with 3832edf61c56a33e988cbdfe317cdffc4319f08a158fcfbda1f03e96d2b506cf not found: ID does not exist" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.352963 4689 scope.go:117] "RemoveContainer" containerID="56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482" Jan 23 11:18:44 crc kubenswrapper[4689]: E0123 11:18:44.353166 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482\": container with ID starting with 56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482 not found: ID does not exist" containerID="56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.353185 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482"} err="failed to get container status \"56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482\": rpc error: code = NotFound desc = could not find container \"56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482\": container with ID starting with 56f7b56ee8f4d3d3500b2dad839285f6cad144e08051e28d1512215f9091d482 not found: ID does not exist" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.361284 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:18:44 crc kubenswrapper[4689]: E0123 11:18:44.361782 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="rabbitmq" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.361800 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="rabbitmq" Jan 23 11:18:44 crc kubenswrapper[4689]: E0123 11:18:44.361825 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerName="heat-api" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.361832 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerName="heat-api" Jan 23 11:18:44 crc kubenswrapper[4689]: E0123 11:18:44.361839 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" containerName="heat-engine" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.361846 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" containerName="heat-engine" Jan 23 11:18:44 crc kubenswrapper[4689]: E0123 11:18:44.361862 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerName="heat-cfnapi" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.361868 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerName="heat-cfnapi" Jan 23 11:18:44 crc kubenswrapper[4689]: E0123 11:18:44.361889 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="setup-container" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.361895 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="setup-container" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.362081 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b102ee6-4978-45ed-9026-94e1c433d3f6" containerName="heat-cfnapi" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.362096 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd9d1a8d-fd0e-4155-8085-5584c456cecb" containerName="heat-api" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.362119 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" containerName="rabbitmq" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.362133 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa54a9d4-e837-442f-9d18-5e7b0a05e807" containerName="heat-engine" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.363376 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.406000 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.512561 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-config-data\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.512655 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/06df4649-6b5c-4a19-be98-7603002120de-pod-info\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.512700 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.512777 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.512831 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.512914 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-server-conf\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.512962 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.513005 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvqfk\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-kube-api-access-xvqfk\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.513029 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.513049 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.513116 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/06df4649-6b5c-4a19-be98-7603002120de-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.615845 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-config-data\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.615925 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/06df4649-6b5c-4a19-be98-7603002120de-pod-info\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.615947 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616000 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616038 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616091 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-server-conf\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616120 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616138 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvqfk\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-kube-api-access-xvqfk\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616179 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616196 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.616240 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/06df4649-6b5c-4a19-be98-7603002120de-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.617666 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.619115 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-config-data\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.621857 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.621895 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/501d83a3da2ec14b1d180069419aef4deb1d58e5f74a36ce9437297357b54132/globalmount\"" pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.623753 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.627197 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.630853 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.631903 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/06df4649-6b5c-4a19-be98-7603002120de-server-conf\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.634825 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.638732 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/06df4649-6b5c-4a19-be98-7603002120de-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.642513 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvqfk\" (UniqueName: \"kubernetes.io/projected/06df4649-6b5c-4a19-be98-7603002120de-kube-api-access-xvqfk\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.648307 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/06df4649-6b5c-4a19-be98-7603002120de-pod-info\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.713909 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e677f00-644a-44e5-a8fd-406a6db49a3f\") pod \"rabbitmq-server-1\" (UID: \"06df4649-6b5c-4a19-be98-7603002120de\") " pod="openstack/rabbitmq-server-1" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.769543 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.923456 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-combined-ca-bundle\") pod \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.923655 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2ff2\" (UniqueName: \"kubernetes.io/projected/30e7aa97-9923-4441-9a8a-5f320f5f3b85-kube-api-access-s2ff2\") pod \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.923799 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-scripts\") pod \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.923940 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-config-data\") pod \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\" (UID: \"30e7aa97-9923-4441-9a8a-5f320f5f3b85\") " Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.929471 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30e7aa97-9923-4441-9a8a-5f320f5f3b85-kube-api-access-s2ff2" (OuterVolumeSpecName: "kube-api-access-s2ff2") pod "30e7aa97-9923-4441-9a8a-5f320f5f3b85" (UID: "30e7aa97-9923-4441-9a8a-5f320f5f3b85"). InnerVolumeSpecName "kube-api-access-s2ff2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.934084 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-scripts" (OuterVolumeSpecName: "scripts") pod "30e7aa97-9923-4441-9a8a-5f320f5f3b85" (UID: "30e7aa97-9923-4441-9a8a-5f320f5f3b85"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.970576 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30e7aa97-9923-4441-9a8a-5f320f5f3b85" (UID: "30e7aa97-9923-4441-9a8a-5f320f5f3b85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:44 crc kubenswrapper[4689]: I0123 11:18:44.980077 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-config-data" (OuterVolumeSpecName: "config-data") pod "30e7aa97-9923-4441-9a8a-5f320f5f3b85" (UID: "30e7aa97-9923-4441-9a8a-5f320f5f3b85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.012787 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.027467 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2ff2\" (UniqueName: \"kubernetes.io/projected/30e7aa97-9923-4441-9a8a-5f320f5f3b85-kube-api-access-s2ff2\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.027514 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.027527 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.027542 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30e7aa97-9923-4441-9a8a-5f320f5f3b85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.271968 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-fpbbp" event={"ID":"30e7aa97-9923-4441-9a8a-5f320f5f3b85","Type":"ContainerDied","Data":"bb6a8e5d8ecf7298b34e909fc3abb97f7969aabe91a9623c1ab914aee9352698"} Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.272009 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb6a8e5d8ecf7298b34e909fc3abb97f7969aabe91a9623c1ab914aee9352698" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.272010 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-fpbbp" Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.559458 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 23 11:18:45 crc kubenswrapper[4689]: I0123 11:18:45.662738 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cee46a2e-5707-4ade-a456-ed3466f9e969" path="/var/lib/kubelet/pods/cee46a2e-5707-4ade-a456-ed3466f9e969/volumes" Jan 23 11:18:46 crc kubenswrapper[4689]: I0123 11:18:46.286809 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"06df4649-6b5c-4a19-be98-7603002120de","Type":"ContainerStarted","Data":"fdb3df4a670fbcae1a30e9025cb166bf04cacc43fb8ca2942699134eb7cbd7c7"} Jan 23 11:18:48 crc kubenswrapper[4689]: I0123 11:18:48.318120 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"06df4649-6b5c-4a19-be98-7603002120de","Type":"ContainerStarted","Data":"15295d66927ff9362771ed0968025542f9cb60ed8136bb88c006e421195a24dc"} Jan 23 11:18:48 crc kubenswrapper[4689]: I0123 11:18:48.751723 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 23 11:18:48 crc kubenswrapper[4689]: I0123 11:18:48.752100 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-api" containerID="cri-o://a6fa46dda54d4ed6635c14115c0b6b4f1a6c42f9b8add809b4c4c971dbf09770" gracePeriod=30 Jan 23 11:18:48 crc kubenswrapper[4689]: I0123 11:18:48.752186 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-listener" containerID="cri-o://d7eb457d141daaffcbc25cecd6691b58c561f23f0d41bf8a66bfbe7e221f9ae4" gracePeriod=30 Jan 23 11:18:48 crc kubenswrapper[4689]: I0123 11:18:48.752228 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-notifier" containerID="cri-o://b3eb9888537db550255e9740628446c590f1bf10711b85c6439a1820f8508b9a" gracePeriod=30 Jan 23 11:18:48 crc kubenswrapper[4689]: I0123 11:18:48.752495 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-evaluator" containerID="cri-o://fb8f34e239b8e2e87141d2a0459740b0f68b78f77665e440b49dad16ef547bf6" gracePeriod=30 Jan 23 11:18:50 crc kubenswrapper[4689]: I0123 11:18:50.340390 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b41a12f-8d19-4604-9105-8d833e25c268" containerID="fb8f34e239b8e2e87141d2a0459740b0f68b78f77665e440b49dad16ef547bf6" exitCode=0 Jan 23 11:18:50 crc kubenswrapper[4689]: I0123 11:18:50.341014 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b41a12f-8d19-4604-9105-8d833e25c268" containerID="a6fa46dda54d4ed6635c14115c0b6b4f1a6c42f9b8add809b4c4c971dbf09770" exitCode=0 Jan 23 11:18:50 crc kubenswrapper[4689]: I0123 11:18:50.340475 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerDied","Data":"fb8f34e239b8e2e87141d2a0459740b0f68b78f77665e440b49dad16ef547bf6"} Jan 23 11:18:50 crc kubenswrapper[4689]: I0123 11:18:50.341068 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerDied","Data":"a6fa46dda54d4ed6635c14115c0b6b4f1a6c42f9b8add809b4c4c971dbf09770"} Jan 23 11:18:53 crc kubenswrapper[4689]: I0123 11:18:53.392344 4689 generic.go:334] "Generic (PLEG): container finished" podID="5e40e6bf-2a52-4686-a459-50df12dfb406" containerID="3c8d2273491199afc34319dad682650dd10a8a23d611be0aa4f84eb331c62104" exitCode=0 Jan 23 11:18:53 crc kubenswrapper[4689]: I0123 11:18:53.392822 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" event={"ID":"5e40e6bf-2a52-4686-a459-50df12dfb406","Type":"ContainerDied","Data":"3c8d2273491199afc34319dad682650dd10a8a23d611be0aa4f84eb331c62104"} Jan 23 11:18:54 crc kubenswrapper[4689]: I0123 11:18:54.405600 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b41a12f-8d19-4604-9105-8d833e25c268" containerID="b3eb9888537db550255e9740628446c590f1bf10711b85c6439a1820f8508b9a" exitCode=0 Jan 23 11:18:54 crc kubenswrapper[4689]: I0123 11:18:54.405678 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerDied","Data":"b3eb9888537db550255e9740628446c590f1bf10711b85c6439a1820f8508b9a"} Jan 23 11:18:54 crc kubenswrapper[4689]: I0123 11:18:54.874106 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.000101 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmfht\" (UniqueName: \"kubernetes.io/projected/5e40e6bf-2a52-4686-a459-50df12dfb406-kube-api-access-pmfht\") pod \"5e40e6bf-2a52-4686-a459-50df12dfb406\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.000175 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-repo-setup-combined-ca-bundle\") pod \"5e40e6bf-2a52-4686-a459-50df12dfb406\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.000315 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-inventory\") pod \"5e40e6bf-2a52-4686-a459-50df12dfb406\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.000345 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-ssh-key-openstack-edpm-ipam\") pod \"5e40e6bf-2a52-4686-a459-50df12dfb406\" (UID: \"5e40e6bf-2a52-4686-a459-50df12dfb406\") " Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.010775 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e40e6bf-2a52-4686-a459-50df12dfb406-kube-api-access-pmfht" (OuterVolumeSpecName: "kube-api-access-pmfht") pod "5e40e6bf-2a52-4686-a459-50df12dfb406" (UID: "5e40e6bf-2a52-4686-a459-50df12dfb406"). InnerVolumeSpecName "kube-api-access-pmfht". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.018901 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "5e40e6bf-2a52-4686-a459-50df12dfb406" (UID: "5e40e6bf-2a52-4686-a459-50df12dfb406"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.070311 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-inventory" (OuterVolumeSpecName: "inventory") pod "5e40e6bf-2a52-4686-a459-50df12dfb406" (UID: "5e40e6bf-2a52-4686-a459-50df12dfb406"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.110071 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmfht\" (UniqueName: \"kubernetes.io/projected/5e40e6bf-2a52-4686-a459-50df12dfb406-kube-api-access-pmfht\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.110125 4689 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.110142 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.140348 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "5e40e6bf-2a52-4686-a459-50df12dfb406" (UID: "5e40e6bf-2a52-4686-a459-50df12dfb406"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.213253 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5e40e6bf-2a52-4686-a459-50df12dfb406-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.416841 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" event={"ID":"5e40e6bf-2a52-4686-a459-50df12dfb406","Type":"ContainerDied","Data":"5153ea9bfd5c938ca4a0c5612d7ff279d6fc466b50807f3ec911357563b6bb7e"} Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.416888 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5153ea9bfd5c938ca4a0c5612d7ff279d6fc466b50807f3ec911357563b6bb7e" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.416996 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.500312 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm"] Jan 23 11:18:55 crc kubenswrapper[4689]: E0123 11:18:55.500960 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e40e6bf-2a52-4686-a459-50df12dfb406" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.500985 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e40e6bf-2a52-4686-a459-50df12dfb406" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 23 11:18:55 crc kubenswrapper[4689]: E0123 11:18:55.501042 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30e7aa97-9923-4441-9a8a-5f320f5f3b85" containerName="aodh-db-sync" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.501052 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="30e7aa97-9923-4441-9a8a-5f320f5f3b85" containerName="aodh-db-sync" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.501402 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e40e6bf-2a52-4686-a459-50df12dfb406" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.501436 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="30e7aa97-9923-4441-9a8a-5f320f5f3b85" containerName="aodh-db-sync" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.502489 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.504477 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.505976 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.506345 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.506581 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.555582 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm"] Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.622665 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58gg8\" (UniqueName: \"kubernetes.io/projected/449fef30-676e-47a0-b1ea-8e5922146176-kube-api-access-58gg8\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.622751 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.623063 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.648685 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:18:55 crc kubenswrapper[4689]: E0123 11:18:55.649022 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.726242 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.726759 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58gg8\" (UniqueName: \"kubernetes.io/projected/449fef30-676e-47a0-b1ea-8e5922146176-kube-api-access-58gg8\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.726915 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.735687 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.736050 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.744920 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.758367 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58gg8\" (UniqueName: \"kubernetes.io/projected/449fef30-676e-47a0-b1ea-8e5922146176-kube-api-access-58gg8\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.768024 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xspnm\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.853123 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:18:55 crc kubenswrapper[4689]: I0123 11:18:55.856517 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.405037 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm"] Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.429259 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" event={"ID":"449fef30-676e-47a0-b1ea-8e5922146176","Type":"ContainerStarted","Data":"631e27b1c1f9dfa27960258619a5f0e4de55f282262cbbbb0fdbed78d1764b30"} Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.433549 4689 generic.go:334] "Generic (PLEG): container finished" podID="6b41a12f-8d19-4604-9105-8d833e25c268" containerID="d7eb457d141daaffcbc25cecd6691b58c561f23f0d41bf8a66bfbe7e221f9ae4" exitCode=0 Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.433595 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerDied","Data":"d7eb457d141daaffcbc25cecd6691b58c561f23f0d41bf8a66bfbe7e221f9ae4"} Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.602114 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.649013 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-combined-ca-bundle\") pod \"6b41a12f-8d19-4604-9105-8d833e25c268\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.649076 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdggn\" (UniqueName: \"kubernetes.io/projected/6b41a12f-8d19-4604-9105-8d833e25c268-kube-api-access-cdggn\") pod \"6b41a12f-8d19-4604-9105-8d833e25c268\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.649281 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-public-tls-certs\") pod \"6b41a12f-8d19-4604-9105-8d833e25c268\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.649333 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-internal-tls-certs\") pod \"6b41a12f-8d19-4604-9105-8d833e25c268\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.649363 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-config-data\") pod \"6b41a12f-8d19-4604-9105-8d833e25c268\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.649378 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-scripts\") pod \"6b41a12f-8d19-4604-9105-8d833e25c268\" (UID: \"6b41a12f-8d19-4604-9105-8d833e25c268\") " Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.658602 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-scripts" (OuterVolumeSpecName: "scripts") pod "6b41a12f-8d19-4604-9105-8d833e25c268" (UID: "6b41a12f-8d19-4604-9105-8d833e25c268"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.667675 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b41a12f-8d19-4604-9105-8d833e25c268-kube-api-access-cdggn" (OuterVolumeSpecName: "kube-api-access-cdggn") pod "6b41a12f-8d19-4604-9105-8d833e25c268" (UID: "6b41a12f-8d19-4604-9105-8d833e25c268"). InnerVolumeSpecName "kube-api-access-cdggn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.746829 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6b41a12f-8d19-4604-9105-8d833e25c268" (UID: "6b41a12f-8d19-4604-9105-8d833e25c268"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.753053 4689 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.753207 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.753311 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdggn\" (UniqueName: \"kubernetes.io/projected/6b41a12f-8d19-4604-9105-8d833e25c268-kube-api-access-cdggn\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.792089 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6b41a12f-8d19-4604-9105-8d833e25c268" (UID: "6b41a12f-8d19-4604-9105-8d833e25c268"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.840211 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-config-data" (OuterVolumeSpecName: "config-data") pod "6b41a12f-8d19-4604-9105-8d833e25c268" (UID: "6b41a12f-8d19-4604-9105-8d833e25c268"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.856923 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b41a12f-8d19-4604-9105-8d833e25c268" (UID: "6b41a12f-8d19-4604-9105-8d833e25c268"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.859111 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.859168 4689 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.859185 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b41a12f-8d19-4604-9105-8d833e25c268-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:18:56 crc kubenswrapper[4689]: I0123 11:18:56.877874 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.446574 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" event={"ID":"449fef30-676e-47a0-b1ea-8e5922146176","Type":"ContainerStarted","Data":"5da5a4c62946add4a1c25c1f724da2020ab10826cfef2c1262500b4cd3a24cb9"} Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.450337 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6b41a12f-8d19-4604-9105-8d833e25c268","Type":"ContainerDied","Data":"fe96681d14f358de3fae44a2b21b2678c97a92178a55412bf972784ae03457fa"} Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.450394 4689 scope.go:117] "RemoveContainer" containerID="d7eb457d141daaffcbc25cecd6691b58c561f23f0d41bf8a66bfbe7e221f9ae4" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.450425 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.477569 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" podStartSLOduration=2.015807648 podStartE2EDuration="2.477549883s" podCreationTimestamp="2026-01-23 11:18:55 +0000 UTC" firstStartedPulling="2026-01-23 11:18:56.412534906 +0000 UTC m=+1801.037214765" lastFinishedPulling="2026-01-23 11:18:56.874277151 +0000 UTC m=+1801.498957000" observedRunningTime="2026-01-23 11:18:57.466034445 +0000 UTC m=+1802.090714304" watchObservedRunningTime="2026-01-23 11:18:57.477549883 +0000 UTC m=+1802.102229742" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.479608 4689 scope.go:117] "RemoveContainer" containerID="b3eb9888537db550255e9740628446c590f1bf10711b85c6439a1820f8508b9a" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.503214 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.504460 4689 scope.go:117] "RemoveContainer" containerID="fb8f34e239b8e2e87141d2a0459740b0f68b78f77665e440b49dad16ef547bf6" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.522339 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.545261 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 23 11:18:57 crc kubenswrapper[4689]: E0123 11:18:57.546248 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-notifier" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546274 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-notifier" Jan 23 11:18:57 crc kubenswrapper[4689]: E0123 11:18:57.546323 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-listener" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546333 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-listener" Jan 23 11:18:57 crc kubenswrapper[4689]: E0123 11:18:57.546347 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-api" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546359 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-api" Jan 23 11:18:57 crc kubenswrapper[4689]: E0123 11:18:57.546393 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-evaluator" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546404 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-evaluator" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546500 4689 scope.go:117] "RemoveContainer" containerID="a6fa46dda54d4ed6635c14115c0b6b4f1a6c42f9b8add809b4c4c971dbf09770" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546734 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-notifier" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546772 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-listener" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546782 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-api" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.546797 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" containerName="aodh-evaluator" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.560121 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.562802 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-hvsrp" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.563171 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.563286 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.563395 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.563428 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.563454 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.656686 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b41a12f-8d19-4604-9105-8d833e25c268" path="/var/lib/kubelet/pods/6b41a12f-8d19-4604-9105-8d833e25c268/volumes" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.685566 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.685700 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-scripts\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.685771 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-public-tls-certs\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.685817 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jk2p\" (UniqueName: \"kubernetes.io/projected/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-kube-api-access-4jk2p\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.685848 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-config-data\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.685923 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-internal-tls-certs\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.788281 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-scripts\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.788480 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-public-tls-certs\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.788564 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jk2p\" (UniqueName: \"kubernetes.io/projected/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-kube-api-access-4jk2p\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.788604 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-config-data\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.788740 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-internal-tls-certs\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.788872 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.795785 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-public-tls-certs\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.795837 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.796456 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-internal-tls-certs\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.801616 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-scripts\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.805756 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-config-data\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.812601 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jk2p\" (UniqueName: \"kubernetes.io/projected/a74499e9-93c1-46dc-825a-3e9d7ec9adf3-kube-api-access-4jk2p\") pod \"aodh-0\" (UID: \"a74499e9-93c1-46dc-825a-3e9d7ec9adf3\") " pod="openstack/aodh-0" Jan 23 11:18:57 crc kubenswrapper[4689]: I0123 11:18:57.897297 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 23 11:18:58 crc kubenswrapper[4689]: W0123 11:18:58.506323 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda74499e9_93c1_46dc_825a_3e9d7ec9adf3.slice/crio-bd77fab2658cf1345805e87c09a7a51a673f7670ed89b072743a1bd819fc5c8e WatchSource:0}: Error finding container bd77fab2658cf1345805e87c09a7a51a673f7670ed89b072743a1bd819fc5c8e: Status 404 returned error can't find the container with id bd77fab2658cf1345805e87c09a7a51a673f7670ed89b072743a1bd819fc5c8e Jan 23 11:18:58 crc kubenswrapper[4689]: I0123 11:18:58.532349 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 23 11:18:59 crc kubenswrapper[4689]: I0123 11:18:59.482428 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a74499e9-93c1-46dc-825a-3e9d7ec9adf3","Type":"ContainerStarted","Data":"a5f691b05d546afe667f8421271051de598df3fb99d649a58bbfab76a06fc3c1"} Jan 23 11:18:59 crc kubenswrapper[4689]: I0123 11:18:59.482739 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a74499e9-93c1-46dc-825a-3e9d7ec9adf3","Type":"ContainerStarted","Data":"bd77fab2658cf1345805e87c09a7a51a673f7670ed89b072743a1bd819fc5c8e"} Jan 23 11:19:00 crc kubenswrapper[4689]: I0123 11:19:00.497419 4689 generic.go:334] "Generic (PLEG): container finished" podID="449fef30-676e-47a0-b1ea-8e5922146176" containerID="5da5a4c62946add4a1c25c1f724da2020ab10826cfef2c1262500b4cd3a24cb9" exitCode=0 Jan 23 11:19:00 crc kubenswrapper[4689]: I0123 11:19:00.497556 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" event={"ID":"449fef30-676e-47a0-b1ea-8e5922146176","Type":"ContainerDied","Data":"5da5a4c62946add4a1c25c1f724da2020ab10826cfef2c1262500b4cd3a24cb9"} Jan 23 11:19:00 crc kubenswrapper[4689]: I0123 11:19:00.500046 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a74499e9-93c1-46dc-825a-3e9d7ec9adf3","Type":"ContainerStarted","Data":"a87041d4ab9f86ab03d14faad7ec0aa4141a950404d5739ab62088c585a48504"} Jan 23 11:19:01 crc kubenswrapper[4689]: I0123 11:19:01.519057 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a74499e9-93c1-46dc-825a-3e9d7ec9adf3","Type":"ContainerStarted","Data":"77ee70889e2345fc7cafc6be40cc62416355a454abeaef39c5287a546191bd96"} Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.439904 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.523233 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58gg8\" (UniqueName: \"kubernetes.io/projected/449fef30-676e-47a0-b1ea-8e5922146176-kube-api-access-58gg8\") pod \"449fef30-676e-47a0-b1ea-8e5922146176\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.523382 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-ssh-key-openstack-edpm-ipam\") pod \"449fef30-676e-47a0-b1ea-8e5922146176\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.523471 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-inventory\") pod \"449fef30-676e-47a0-b1ea-8e5922146176\" (UID: \"449fef30-676e-47a0-b1ea-8e5922146176\") " Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.541396 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/449fef30-676e-47a0-b1ea-8e5922146176-kube-api-access-58gg8" (OuterVolumeSpecName: "kube-api-access-58gg8") pod "449fef30-676e-47a0-b1ea-8e5922146176" (UID: "449fef30-676e-47a0-b1ea-8e5922146176"). InnerVolumeSpecName "kube-api-access-58gg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.591174 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.591211 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xspnm" event={"ID":"449fef30-676e-47a0-b1ea-8e5922146176","Type":"ContainerDied","Data":"631e27b1c1f9dfa27960258619a5f0e4de55f282262cbbbb0fdbed78d1764b30"} Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.591266 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="631e27b1c1f9dfa27960258619a5f0e4de55f282262cbbbb0fdbed78d1764b30" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.596260 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-inventory" (OuterVolumeSpecName: "inventory") pod "449fef30-676e-47a0-b1ea-8e5922146176" (UID: "449fef30-676e-47a0-b1ea-8e5922146176"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.622391 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "449fef30-676e-47a0-b1ea-8e5922146176" (UID: "449fef30-676e-47a0-b1ea-8e5922146176"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.644538 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58gg8\" (UniqueName: \"kubernetes.io/projected/449fef30-676e-47a0-b1ea-8e5922146176-kube-api-access-58gg8\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.644580 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.644595 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/449fef30-676e-47a0-b1ea-8e5922146176-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.654023 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj"] Jan 23 11:19:02 crc kubenswrapper[4689]: E0123 11:19:02.654566 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="449fef30-676e-47a0-b1ea-8e5922146176" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.654583 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="449fef30-676e-47a0-b1ea-8e5922146176" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.654850 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="449fef30-676e-47a0-b1ea-8e5922146176" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.655765 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.667833 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj"] Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.749248 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbwc9\" (UniqueName: \"kubernetes.io/projected/16110fd6-6d8f-4901-8f68-b155d2a27236-kube-api-access-mbwc9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.749413 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.749494 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.749634 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.851270 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbwc9\" (UniqueName: \"kubernetes.io/projected/16110fd6-6d8f-4901-8f68-b155d2a27236-kube-api-access-mbwc9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.851396 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.851448 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.851535 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.855111 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.855301 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.856635 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.875087 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbwc9\" (UniqueName: \"kubernetes.io/projected/16110fd6-6d8f-4901-8f68-b155d2a27236-kube-api-access-mbwc9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:02 crc kubenswrapper[4689]: I0123 11:19:02.992524 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:19:03 crc kubenswrapper[4689]: W0123 11:19:03.586218 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16110fd6_6d8f_4901_8f68_b155d2a27236.slice/crio-8181c98c84671d4148ade12660ed0ee91ec1c5fb7cfdf19ddba350082981492f WatchSource:0}: Error finding container 8181c98c84671d4148ade12660ed0ee91ec1c5fb7cfdf19ddba350082981492f: Status 404 returned error can't find the container with id 8181c98c84671d4148ade12660ed0ee91ec1c5fb7cfdf19ddba350082981492f Jan 23 11:19:03 crc kubenswrapper[4689]: I0123 11:19:03.605057 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj"] Jan 23 11:19:03 crc kubenswrapper[4689]: I0123 11:19:03.660002 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a74499e9-93c1-46dc-825a-3e9d7ec9adf3","Type":"ContainerStarted","Data":"7964a0f8a92441dcbcd98960dfec8bfb4b6a7892f67074193d1b0574f2800cb9"} Jan 23 11:19:03 crc kubenswrapper[4689]: I0123 11:19:03.660072 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" event={"ID":"16110fd6-6d8f-4901-8f68-b155d2a27236","Type":"ContainerStarted","Data":"8181c98c84671d4148ade12660ed0ee91ec1c5fb7cfdf19ddba350082981492f"} Jan 23 11:19:03 crc kubenswrapper[4689]: I0123 11:19:03.686694 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.9113960370000003 podStartE2EDuration="6.686673527s" podCreationTimestamp="2026-01-23 11:18:57 +0000 UTC" firstStartedPulling="2026-01-23 11:18:58.513068735 +0000 UTC m=+1803.137748594" lastFinishedPulling="2026-01-23 11:19:02.288346215 +0000 UTC m=+1806.913026084" observedRunningTime="2026-01-23 11:19:03.666974726 +0000 UTC m=+1808.291654585" watchObservedRunningTime="2026-01-23 11:19:03.686673527 +0000 UTC m=+1808.311353386" Jan 23 11:19:04 crc kubenswrapper[4689]: I0123 11:19:04.666673 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" event={"ID":"16110fd6-6d8f-4901-8f68-b155d2a27236","Type":"ContainerStarted","Data":"e7971324e3c57cba335266b568c582da6497a8e5eeaeb6afdaae5e501f993e7a"} Jan 23 11:19:04 crc kubenswrapper[4689]: I0123 11:19:04.696077 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" podStartSLOduration=2.008037364 podStartE2EDuration="2.696049718s" podCreationTimestamp="2026-01-23 11:19:02 +0000 UTC" firstStartedPulling="2026-01-23 11:19:03.590040709 +0000 UTC m=+1808.214720568" lastFinishedPulling="2026-01-23 11:19:04.278053063 +0000 UTC m=+1808.902732922" observedRunningTime="2026-01-23 11:19:04.683222519 +0000 UTC m=+1809.307902398" watchObservedRunningTime="2026-01-23 11:19:04.696049718 +0000 UTC m=+1809.320729577" Jan 23 11:19:06 crc kubenswrapper[4689]: I0123 11:19:06.640811 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:19:07 crc kubenswrapper[4689]: I0123 11:19:07.703047 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"105d16365ad97c922e3f7df6b806bc3e7525acd81cd64b9ddd66539fe5554ac9"} Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.519544 4689 scope.go:117] "RemoveContainer" containerID="bd0a6af6f2e151ed17842795c8ec421f2fae0323ac1c355fed9f395a8bcadeec" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.558381 4689 scope.go:117] "RemoveContainer" containerID="8c0cd05ab8e4510c97cce55bbfd65475091d68cd4699957d837e72940f27a119" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.587201 4689 scope.go:117] "RemoveContainer" containerID="b0fd81ce1f3137d9fefec80dd7232b4881694ab03f9745191007e1ff8c81b81a" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.611349 4689 scope.go:117] "RemoveContainer" containerID="97f054985c008fc2d05deea0e28fe31a4763fd73a3c97a4ca0d6b90c1bc747f1" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.639026 4689 scope.go:117] "RemoveContainer" containerID="0d8088a4de1cdda262a8e859efdc1e86d7af2369f57fc39ec6473db28b41ab97" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.733805 4689 scope.go:117] "RemoveContainer" containerID="1dc3e6fb8619da30ce4d89119e5aa084639aded52ce528bd51582a0eb45a0995" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.766768 4689 scope.go:117] "RemoveContainer" containerID="8f753b39ca88fe7113e07b4b87bed62e256ed39551e9b961889598c74c97e508" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.824235 4689 scope.go:117] "RemoveContainer" containerID="fd64a86e7b1aed59e47a099b6fb57f58163ce37c5d30c306825ce9a4534b16f2" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.878792 4689 scope.go:117] "RemoveContainer" containerID="c7c30db92bdc875cce4aefa0694b1272c32306535a4d8a68b4bc32de3bcb92c5" Jan 23 11:19:15 crc kubenswrapper[4689]: I0123 11:19:15.905014 4689 scope.go:117] "RemoveContainer" containerID="1ba8e049bf2430cda7670a46927a921dc626316e0c06256c239ae8673054f611" Jan 23 11:19:20 crc kubenswrapper[4689]: I0123 11:19:20.895544 4689 generic.go:334] "Generic (PLEG): container finished" podID="06df4649-6b5c-4a19-be98-7603002120de" containerID="15295d66927ff9362771ed0968025542f9cb60ed8136bb88c006e421195a24dc" exitCode=0 Jan 23 11:19:20 crc kubenswrapper[4689]: I0123 11:19:20.895629 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"06df4649-6b5c-4a19-be98-7603002120de","Type":"ContainerDied","Data":"15295d66927ff9362771ed0968025542f9cb60ed8136bb88c006e421195a24dc"} Jan 23 11:19:21 crc kubenswrapper[4689]: I0123 11:19:21.908826 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"06df4649-6b5c-4a19-be98-7603002120de","Type":"ContainerStarted","Data":"9295aabc7fb1c78091ea6043a9e7b44a952ebc019c02af6ec73c6c04389cc3eb"} Jan 23 11:19:21 crc kubenswrapper[4689]: I0123 11:19:21.909669 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Jan 23 11:19:21 crc kubenswrapper[4689]: I0123 11:19:21.931861 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=37.931837047 podStartE2EDuration="37.931837047s" podCreationTimestamp="2026-01-23 11:18:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:19:21.929287344 +0000 UTC m=+1826.553967203" watchObservedRunningTime="2026-01-23 11:19:21.931837047 +0000 UTC m=+1826.556516906" Jan 23 11:19:35 crc kubenswrapper[4689]: I0123 11:19:35.016360 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Jan 23 11:19:35 crc kubenswrapper[4689]: I0123 11:19:35.066308 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:19:39 crc kubenswrapper[4689]: I0123 11:19:39.894395 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="rabbitmq" containerID="cri-o://12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932" gracePeriod=604796 Jan 23 11:19:40 crc kubenswrapper[4689]: I0123 11:19:40.918693 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.127:5671: connect: connection refused" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.682506 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.789912 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-tls\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.790023 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-erlang-cookie\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.790062 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-server-conf\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.790224 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-confd\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.790264 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-config-data\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.790350 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-erlang-cookie-secret\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.791239 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.791282 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.791404 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-plugins-conf\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.791508 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-plugins\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.791589 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxnd9\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-kube-api-access-fxnd9\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.791641 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-pod-info\") pod \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\" (UID: \"8287b3f6-975a-4082-a086-bd1ee9ec4d7b\") " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.792352 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.792721 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.792976 4689 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.792997 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.793008 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.800465 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.808293 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-kube-api-access-fxnd9" (OuterVolumeSpecName: "kube-api-access-fxnd9") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "kube-api-access-fxnd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.808738 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-pod-info" (OuterVolumeSpecName: "pod-info") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.815021 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.836939 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-config-data" (OuterVolumeSpecName: "config-data") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.840775 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69" (OuterVolumeSpecName: "persistence") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.862716 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-server-conf" (OuterVolumeSpecName: "server-conf") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.896282 4689 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.896337 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") on node \"crc\" " Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.896352 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxnd9\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-kube-api-access-fxnd9\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.896361 4689 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-pod-info\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.896369 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.896378 4689 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-server-conf\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.896386 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.946446 4689 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.946607 4689 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69") on node "crc" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.974204 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "8287b3f6-975a-4082-a086-bd1ee9ec4d7b" (UID: "8287b3f6-975a-4082-a086-bd1ee9ec4d7b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.998541 4689 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8287b3f6-975a-4082-a086-bd1ee9ec4d7b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:46 crc kubenswrapper[4689]: I0123 11:19:46.998762 4689 reconciler_common.go:293] "Volume detached for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") on node \"crc\" DevicePath \"\"" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.182981 4689 generic.go:334] "Generic (PLEG): container finished" podID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerID="12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932" exitCode=0 Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.183041 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.183058 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8287b3f6-975a-4082-a086-bd1ee9ec4d7b","Type":"ContainerDied","Data":"12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932"} Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.183394 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8287b3f6-975a-4082-a086-bd1ee9ec4d7b","Type":"ContainerDied","Data":"4fe1935ab432ef302a11cce2f11c0d4be10aef8b0eac81ab75516a5a9c81e7e8"} Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.183411 4689 scope.go:117] "RemoveContainer" containerID="12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.259612 4689 scope.go:117] "RemoveContainer" containerID="7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.300288 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.311430 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.312092 4689 scope.go:117] "RemoveContainer" containerID="12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932" Jan 23 11:19:47 crc kubenswrapper[4689]: E0123 11:19:47.312688 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932\": container with ID starting with 12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932 not found: ID does not exist" containerID="12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.312767 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932"} err="failed to get container status \"12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932\": rpc error: code = NotFound desc = could not find container \"12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932\": container with ID starting with 12faed1faf16aadbfa7833cfd2d58ba99d8f2e887ec32a1477fe6e0a4410a932 not found: ID does not exist" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.312795 4689 scope.go:117] "RemoveContainer" containerID="7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770" Jan 23 11:19:47 crc kubenswrapper[4689]: E0123 11:19:47.313128 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770\": container with ID starting with 7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770 not found: ID does not exist" containerID="7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.313247 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770"} err="failed to get container status \"7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770\": rpc error: code = NotFound desc = could not find container \"7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770\": container with ID starting with 7a3a0a3b02c8b92aaa27372f09a184cdb45bd6a86fd293ac0f521dd66b646770 not found: ID does not exist" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.334329 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:19:47 crc kubenswrapper[4689]: E0123 11:19:47.334977 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="rabbitmq" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.335004 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="rabbitmq" Jan 23 11:19:47 crc kubenswrapper[4689]: E0123 11:19:47.335048 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="setup-container" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.335057 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="setup-container" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.335378 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" containerName="rabbitmq" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.337024 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.350022 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411488 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/94aeeba2-67ab-417d-8a04-a8f22353294f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411531 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/94aeeba2-67ab-417d-8a04-a8f22353294f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411561 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411652 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411681 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-config-data\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411717 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411782 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbzpm\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-kube-api-access-gbzpm\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411810 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411901 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411917 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.411938 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.514408 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.514530 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbzpm\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-kube-api-access-gbzpm\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.514571 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.514710 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.514769 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515358 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515424 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515476 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/94aeeba2-67ab-417d-8a04-a8f22353294f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515499 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/94aeeba2-67ab-417d-8a04-a8f22353294f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515550 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515690 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515703 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.515799 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-config-data\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.516974 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-config-data\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.517128 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.517783 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/94aeeba2-67ab-417d-8a04-a8f22353294f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.519017 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.519056 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/94aeeba2-67ab-417d-8a04-a8f22353294f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.519365 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.520935 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/94aeeba2-67ab-417d-8a04-a8f22353294f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.522068 4689 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.522243 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e3bfba15c2620bda99933c0d1fcd02c9f7555e6a83c90ae95be6723cb9e7b56d/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.535214 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbzpm\" (UniqueName: \"kubernetes.io/projected/94aeeba2-67ab-417d-8a04-a8f22353294f-kube-api-access-gbzpm\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.592116 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-83ec4221-1dbd-4502-87ad-af9bad7a9f69\") pod \"rabbitmq-server-0\" (UID: \"94aeeba2-67ab-417d-8a04-a8f22353294f\") " pod="openstack/rabbitmq-server-0" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.652248 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8287b3f6-975a-4082-a086-bd1ee9ec4d7b" path="/var/lib/kubelet/pods/8287b3f6-975a-4082-a086-bd1ee9ec4d7b/volumes" Jan 23 11:19:47 crc kubenswrapper[4689]: I0123 11:19:47.668053 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 11:19:48 crc kubenswrapper[4689]: I0123 11:19:48.187905 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 11:19:48 crc kubenswrapper[4689]: W0123 11:19:48.191407 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94aeeba2_67ab_417d_8a04_a8f22353294f.slice/crio-ccf91a85ecb6d0cd22f55b57827fd28e7320dff4e7c6d240c6d074fefb8b23c1 WatchSource:0}: Error finding container ccf91a85ecb6d0cd22f55b57827fd28e7320dff4e7c6d240c6d074fefb8b23c1: Status 404 returned error can't find the container with id ccf91a85ecb6d0cd22f55b57827fd28e7320dff4e7c6d240c6d074fefb8b23c1 Jan 23 11:19:49 crc kubenswrapper[4689]: I0123 11:19:49.215848 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"94aeeba2-67ab-417d-8a04-a8f22353294f","Type":"ContainerStarted","Data":"ccf91a85ecb6d0cd22f55b57827fd28e7320dff4e7c6d240c6d074fefb8b23c1"} Jan 23 11:19:50 crc kubenswrapper[4689]: I0123 11:19:50.229472 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"94aeeba2-67ab-417d-8a04-a8f22353294f","Type":"ContainerStarted","Data":"993d5ce680eac7216c389e3d5300365e4ee6457a4eb47f6de6ed447f672ac87e"} Jan 23 11:20:23 crc kubenswrapper[4689]: I0123 11:20:23.653048 4689 generic.go:334] "Generic (PLEG): container finished" podID="94aeeba2-67ab-417d-8a04-a8f22353294f" containerID="993d5ce680eac7216c389e3d5300365e4ee6457a4eb47f6de6ed447f672ac87e" exitCode=0 Jan 23 11:20:23 crc kubenswrapper[4689]: I0123 11:20:23.666893 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"94aeeba2-67ab-417d-8a04-a8f22353294f","Type":"ContainerDied","Data":"993d5ce680eac7216c389e3d5300365e4ee6457a4eb47f6de6ed447f672ac87e"} Jan 23 11:20:24 crc kubenswrapper[4689]: I0123 11:20:24.669735 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"94aeeba2-67ab-417d-8a04-a8f22353294f","Type":"ContainerStarted","Data":"561af6afc77c3736b7d4df48e90a11e31e22a3a170448c19545ceb9017d3c503"} Jan 23 11:20:24 crc kubenswrapper[4689]: I0123 11:20:24.671503 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 23 11:20:24 crc kubenswrapper[4689]: I0123 11:20:24.701602 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.701583082 podStartE2EDuration="37.701583082s" podCreationTimestamp="2026-01-23 11:19:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:20:24.690628489 +0000 UTC m=+1889.315308368" watchObservedRunningTime="2026-01-23 11:20:24.701583082 +0000 UTC m=+1889.326262941" Jan 23 11:20:37 crc kubenswrapper[4689]: I0123 11:20:37.672482 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 23 11:21:14 crc kubenswrapper[4689]: I0123 11:21:14.055064 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5x5bf"] Jan 23 11:21:14 crc kubenswrapper[4689]: I0123 11:21:14.071119 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-16b3-account-create-update-m8m5d"] Jan 23 11:21:14 crc kubenswrapper[4689]: I0123 11:21:14.082618 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-16b3-account-create-update-m8m5d"] Jan 23 11:21:14 crc kubenswrapper[4689]: I0123 11:21:14.093650 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5x5bf"] Jan 23 11:21:15 crc kubenswrapper[4689]: I0123 11:21:15.653742 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85404a8c-170b-4e87-9f31-c584c1cd479d" path="/var/lib/kubelet/pods/85404a8c-170b-4e87-9f31-c584c1cd479d/volumes" Jan 23 11:21:15 crc kubenswrapper[4689]: I0123 11:21:15.654989 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4c15870-4970-4b10-b1fb-ecd61c9d341b" path="/var/lib/kubelet/pods/a4c15870-4970-4b10-b1fb-ecd61c9d341b/volumes" Jan 23 11:21:16 crc kubenswrapper[4689]: I0123 11:21:16.259661 4689 scope.go:117] "RemoveContainer" containerID="a1b5137c2ed5bba3732515cce96013d48c40bcbec246de59219540e533ecfe0e" Jan 23 11:21:16 crc kubenswrapper[4689]: I0123 11:21:16.312422 4689 scope.go:117] "RemoveContainer" containerID="8fb4ee08b8cded89de9a5e23c95558ff0f7464a213648efd6be67612ba917de0" Jan 23 11:21:16 crc kubenswrapper[4689]: I0123 11:21:16.374044 4689 scope.go:117] "RemoveContainer" containerID="ec07cf8555d0a386dbe82e4c2dfbb4baa36cc85c76c9efff248e2a93512d1b9f" Jan 23 11:21:17 crc kubenswrapper[4689]: I0123 11:21:17.047121 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-drbfg"] Jan 23 11:21:17 crc kubenswrapper[4689]: I0123 11:21:17.062245 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-drbfg"] Jan 23 11:21:17 crc kubenswrapper[4689]: I0123 11:21:17.653113 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="919c9b5f-f82f-45c9-a6df-7193a6a7e6f0" path="/var/lib/kubelet/pods/919c9b5f-f82f-45c9-a6df-7193a6a7e6f0/volumes" Jan 23 11:21:18 crc kubenswrapper[4689]: I0123 11:21:18.073814 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-05a3-account-create-update-8sl6r"] Jan 23 11:21:18 crc kubenswrapper[4689]: I0123 11:21:18.085496 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6c5d-account-create-update-djhds"] Jan 23 11:21:18 crc kubenswrapper[4689]: I0123 11:21:18.097428 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-05a3-account-create-update-8sl6r"] Jan 23 11:21:18 crc kubenswrapper[4689]: I0123 11:21:18.110755 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-hzwkd"] Jan 23 11:21:18 crc kubenswrapper[4689]: I0123 11:21:18.122928 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6c5d-account-create-update-djhds"] Jan 23 11:21:18 crc kubenswrapper[4689]: I0123 11:21:18.134800 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-hzwkd"] Jan 23 11:21:19 crc kubenswrapper[4689]: I0123 11:21:19.654025 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a9df33f-1781-47c3-b4dc-0f21bdf88a05" path="/var/lib/kubelet/pods/7a9df33f-1781-47c3-b4dc-0f21bdf88a05/volumes" Jan 23 11:21:19 crc kubenswrapper[4689]: I0123 11:21:19.654869 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e16c499-f349-4cd0-bbe9-85524b31ba67" path="/var/lib/kubelet/pods/7e16c499-f349-4cd0-bbe9-85524b31ba67/volumes" Jan 23 11:21:19 crc kubenswrapper[4689]: I0123 11:21:19.655498 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3d7001f-8caf-4867-bfec-1e60eaeefad8" path="/var/lib/kubelet/pods/f3d7001f-8caf-4867-bfec-1e60eaeefad8/volumes" Jan 23 11:21:21 crc kubenswrapper[4689]: I0123 11:21:21.035828 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-nxlln"] Jan 23 11:21:21 crc kubenswrapper[4689]: I0123 11:21:21.048211 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-2f06-account-create-update-7hlnh"] Jan 23 11:21:21 crc kubenswrapper[4689]: I0123 11:21:21.060827 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-2f06-account-create-update-7hlnh"] Jan 23 11:21:21 crc kubenswrapper[4689]: I0123 11:21:21.072810 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-nxlln"] Jan 23 11:21:21 crc kubenswrapper[4689]: I0123 11:21:21.657037 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02b22380-5105-4302-a086-e75dd58e63e5" path="/var/lib/kubelet/pods/02b22380-5105-4302-a086-e75dd58e63e5/volumes" Jan 23 11:21:21 crc kubenswrapper[4689]: I0123 11:21:21.658132 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7167d100-f446-4745-b762-16d355cf4b9c" path="/var/lib/kubelet/pods/7167d100-f446-4745-b762-16d355cf4b9c/volumes" Jan 23 11:21:26 crc kubenswrapper[4689]: I0123 11:21:26.043214 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-vj2d8"] Jan 23 11:21:26 crc kubenswrapper[4689]: I0123 11:21:26.060420 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-vj2d8"] Jan 23 11:21:27 crc kubenswrapper[4689]: I0123 11:21:27.654058 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c52a6d4e-b28d-49de-ba97-65d3e33e9d56" path="/var/lib/kubelet/pods/c52a6d4e-b28d-49de-ba97-65d3e33e9d56/volumes" Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.049341 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-lh96s"] Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.062186 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-8e94-account-create-update-5f7ck"] Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.074810 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-lh96s"] Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.087103 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-8e94-account-create-update-5f7ck"] Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.310985 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.311041 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.652821 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3602ba50-13ae-492b-855d-9b7f51c6d398" path="/var/lib/kubelet/pods/3602ba50-13ae-492b-855d-9b7f51c6d398/volumes" Jan 23 11:21:33 crc kubenswrapper[4689]: I0123 11:21:33.653876 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e72bd573-8bb9-43e3-8bce-26701d118894" path="/var/lib/kubelet/pods/e72bd573-8bb9-43e3-8bce-26701d118894/volumes" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.004695 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cwdd2"] Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.007502 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.025877 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cwdd2"] Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.077888 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-utilities\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.078365 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-catalog-content\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.078640 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4gmq\" (UniqueName: \"kubernetes.io/projected/0cbab070-713b-40d3-8f54-2867075c3c0d-kube-api-access-x4gmq\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.181390 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4gmq\" (UniqueName: \"kubernetes.io/projected/0cbab070-713b-40d3-8f54-2867075c3c0d-kube-api-access-x4gmq\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.181495 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-utilities\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.181673 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-catalog-content\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.182619 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-catalog-content\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.182699 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-utilities\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.217325 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4gmq\" (UniqueName: \"kubernetes.io/projected/0cbab070-713b-40d3-8f54-2867075c3c0d-kube-api-access-x4gmq\") pod \"redhat-marketplace-cwdd2\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:40 crc kubenswrapper[4689]: I0123 11:21:40.325914 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:41 crc kubenswrapper[4689]: I0123 11:21:41.039900 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cwdd2"] Jan 23 11:21:41 crc kubenswrapper[4689]: I0123 11:21:41.688547 4689 generic.go:334] "Generic (PLEG): container finished" podID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerID="1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b" exitCode=0 Jan 23 11:21:41 crc kubenswrapper[4689]: I0123 11:21:41.688822 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cwdd2" event={"ID":"0cbab070-713b-40d3-8f54-2867075c3c0d","Type":"ContainerDied","Data":"1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b"} Jan 23 11:21:41 crc kubenswrapper[4689]: I0123 11:21:41.688852 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cwdd2" event={"ID":"0cbab070-713b-40d3-8f54-2867075c3c0d","Type":"ContainerStarted","Data":"2833836146a8fb1b1b6360b1ab290c701adc358504ab8f1980be50f43a473c05"} Jan 23 11:21:41 crc kubenswrapper[4689]: I0123 11:21:41.690866 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:21:42 crc kubenswrapper[4689]: I0123 11:21:42.700319 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cwdd2" event={"ID":"0cbab070-713b-40d3-8f54-2867075c3c0d","Type":"ContainerStarted","Data":"80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe"} Jan 23 11:21:44 crc kubenswrapper[4689]: I0123 11:21:44.723578 4689 generic.go:334] "Generic (PLEG): container finished" podID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerID="80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe" exitCode=0 Jan 23 11:21:44 crc kubenswrapper[4689]: I0123 11:21:44.723663 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cwdd2" event={"ID":"0cbab070-713b-40d3-8f54-2867075c3c0d","Type":"ContainerDied","Data":"80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe"} Jan 23 11:21:45 crc kubenswrapper[4689]: I0123 11:21:45.736836 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cwdd2" event={"ID":"0cbab070-713b-40d3-8f54-2867075c3c0d","Type":"ContainerStarted","Data":"3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0"} Jan 23 11:21:45 crc kubenswrapper[4689]: I0123 11:21:45.755216 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cwdd2" podStartSLOduration=3.266240916 podStartE2EDuration="6.755194982s" podCreationTimestamp="2026-01-23 11:21:39 +0000 UTC" firstStartedPulling="2026-01-23 11:21:41.690671802 +0000 UTC m=+1966.315351661" lastFinishedPulling="2026-01-23 11:21:45.179625868 +0000 UTC m=+1969.804305727" observedRunningTime="2026-01-23 11:21:45.754200277 +0000 UTC m=+1970.378880156" watchObservedRunningTime="2026-01-23 11:21:45.755194982 +0000 UTC m=+1970.379874841" Jan 23 11:21:46 crc kubenswrapper[4689]: I0123 11:21:46.036257 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8a74-account-create-update-fzj69"] Jan 23 11:21:46 crc kubenswrapper[4689]: I0123 11:21:46.048337 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8a74-account-create-update-fzj69"] Jan 23 11:21:46 crc kubenswrapper[4689]: I0123 11:21:46.063894 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-7e2d-account-create-update-t5thb"] Jan 23 11:21:46 crc kubenswrapper[4689]: I0123 11:21:46.078946 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-7e2d-account-create-update-t5thb"] Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.038791 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-fd90-account-create-update-mb96s"] Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.053564 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-rh66n"] Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.065790 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-fd90-account-create-update-mb96s"] Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.078007 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-rh66n"] Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.091889 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-4gf6b"] Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.104346 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-4gf6b"] Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.653784 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18e705d3-b57f-46e9-9c58-4a889bb49638" path="/var/lib/kubelet/pods/18e705d3-b57f-46e9-9c58-4a889bb49638/volumes" Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.654453 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a3222e1-02e3-44e3-ad01-ae2f033ceac3" path="/var/lib/kubelet/pods/2a3222e1-02e3-44e3-ad01-ae2f033ceac3/volumes" Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.655074 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dcba22c-2c98-4d8a-91c6-ce572778f1cb" path="/var/lib/kubelet/pods/3dcba22c-2c98-4d8a-91c6-ce572778f1cb/volumes" Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.655726 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ac9258d-ea9b-4018-bb50-2767a4aabfd2" path="/var/lib/kubelet/pods/5ac9258d-ea9b-4018-bb50-2767a4aabfd2/volumes" Jan 23 11:21:47 crc kubenswrapper[4689]: I0123 11:21:47.657175 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98a0462a-471a-4ed9-90a6-0e185f70b2bb" path="/var/lib/kubelet/pods/98a0462a-471a-4ed9-90a6-0e185f70b2bb/volumes" Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.037687 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-fklps"] Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.048844 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-r9t2h"] Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.060246 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e9e9-account-create-update-5zvgd"] Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.071725 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-fklps"] Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.082092 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-r9t2h"] Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.091304 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-e9e9-account-create-update-5zvgd"] Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.327853 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.327894 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.413921 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.846382 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:50 crc kubenswrapper[4689]: I0123 11:21:50.906554 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cwdd2"] Jan 23 11:21:51 crc kubenswrapper[4689]: I0123 11:21:51.656761 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1744ca79-20e6-40d0-8d71-699e88c7013d" path="/var/lib/kubelet/pods/1744ca79-20e6-40d0-8d71-699e88c7013d/volumes" Jan 23 11:21:51 crc kubenswrapper[4689]: I0123 11:21:51.657953 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc3173d9-5b9e-47fc-a5aa-3a260297c997" path="/var/lib/kubelet/pods/cc3173d9-5b9e-47fc-a5aa-3a260297c997/volumes" Jan 23 11:21:51 crc kubenswrapper[4689]: I0123 11:21:51.658883 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db4a1b6f-833b-4ef8-a55b-707d4a135fdb" path="/var/lib/kubelet/pods/db4a1b6f-833b-4ef8-a55b-707d4a135fdb/volumes" Jan 23 11:21:52 crc kubenswrapper[4689]: I0123 11:21:52.822124 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cwdd2" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="registry-server" containerID="cri-o://3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0" gracePeriod=2 Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.385623 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.445699 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-utilities\") pod \"0cbab070-713b-40d3-8f54-2867075c3c0d\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.445992 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-catalog-content\") pod \"0cbab070-713b-40d3-8f54-2867075c3c0d\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.446060 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4gmq\" (UniqueName: \"kubernetes.io/projected/0cbab070-713b-40d3-8f54-2867075c3c0d-kube-api-access-x4gmq\") pod \"0cbab070-713b-40d3-8f54-2867075c3c0d\" (UID: \"0cbab070-713b-40d3-8f54-2867075c3c0d\") " Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.446710 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-utilities" (OuterVolumeSpecName: "utilities") pod "0cbab070-713b-40d3-8f54-2867075c3c0d" (UID: "0cbab070-713b-40d3-8f54-2867075c3c0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.459998 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cbab070-713b-40d3-8f54-2867075c3c0d-kube-api-access-x4gmq" (OuterVolumeSpecName: "kube-api-access-x4gmq") pod "0cbab070-713b-40d3-8f54-2867075c3c0d" (UID: "0cbab070-713b-40d3-8f54-2867075c3c0d"). InnerVolumeSpecName "kube-api-access-x4gmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.471383 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0cbab070-713b-40d3-8f54-2867075c3c0d" (UID: "0cbab070-713b-40d3-8f54-2867075c3c0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.549126 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.549500 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4gmq\" (UniqueName: \"kubernetes.io/projected/0cbab070-713b-40d3-8f54-2867075c3c0d-kube-api-access-x4gmq\") on node \"crc\" DevicePath \"\"" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.549520 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cbab070-713b-40d3-8f54-2867075c3c0d-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.835010 4689 generic.go:334] "Generic (PLEG): container finished" podID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerID="3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0" exitCode=0 Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.835052 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cwdd2" event={"ID":"0cbab070-713b-40d3-8f54-2867075c3c0d","Type":"ContainerDied","Data":"3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0"} Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.835087 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cwdd2" event={"ID":"0cbab070-713b-40d3-8f54-2867075c3c0d","Type":"ContainerDied","Data":"2833836146a8fb1b1b6360b1ab290c701adc358504ab8f1980be50f43a473c05"} Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.835121 4689 scope.go:117] "RemoveContainer" containerID="3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.835190 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cwdd2" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.862173 4689 scope.go:117] "RemoveContainer" containerID="80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.864786 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cwdd2"] Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.878892 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cwdd2"] Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.886700 4689 scope.go:117] "RemoveContainer" containerID="1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.949250 4689 scope.go:117] "RemoveContainer" containerID="3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0" Jan 23 11:21:53 crc kubenswrapper[4689]: E0123 11:21:53.949903 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0\": container with ID starting with 3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0 not found: ID does not exist" containerID="3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.949958 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0"} err="failed to get container status \"3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0\": rpc error: code = NotFound desc = could not find container \"3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0\": container with ID starting with 3b8ca9e6d827513a632794ceefe47e4adb9f8afd9323f4e44433e1e6d977fac0 not found: ID does not exist" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.949995 4689 scope.go:117] "RemoveContainer" containerID="80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe" Jan 23 11:21:53 crc kubenswrapper[4689]: E0123 11:21:53.950294 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe\": container with ID starting with 80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe not found: ID does not exist" containerID="80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.950337 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe"} err="failed to get container status \"80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe\": rpc error: code = NotFound desc = could not find container \"80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe\": container with ID starting with 80cccaa63e2b463ba4c4b69c6d774bd954678add5da629d682bf8b78613c9cfe not found: ID does not exist" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.950363 4689 scope.go:117] "RemoveContainer" containerID="1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b" Jan 23 11:21:53 crc kubenswrapper[4689]: E0123 11:21:53.950590 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b\": container with ID starting with 1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b not found: ID does not exist" containerID="1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b" Jan 23 11:21:53 crc kubenswrapper[4689]: I0123 11:21:53.950632 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b"} err="failed to get container status \"1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b\": rpc error: code = NotFound desc = could not find container \"1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b\": container with ID starting with 1cdd75d1002a57cbb0bf437c17d02b6a17986bffb17926a5a6320149733aa81b not found: ID does not exist" Jan 23 11:21:55 crc kubenswrapper[4689]: I0123 11:21:55.037203 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-68w28"] Jan 23 11:21:55 crc kubenswrapper[4689]: I0123 11:21:55.049830 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-68w28"] Jan 23 11:21:55 crc kubenswrapper[4689]: I0123 11:21:55.675812 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" path="/var/lib/kubelet/pods/0cbab070-713b-40d3-8f54-2867075c3c0d/volumes" Jan 23 11:21:55 crc kubenswrapper[4689]: I0123 11:21:55.676877 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb7b2de7-60f5-4357-a6ec-21c925346c7f" path="/var/lib/kubelet/pods/eb7b2de7-60f5-4357-a6ec-21c925346c7f/volumes" Jan 23 11:22:03 crc kubenswrapper[4689]: I0123 11:22:03.311053 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:22:03 crc kubenswrapper[4689]: I0123 11:22:03.312277 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.530163 4689 scope.go:117] "RemoveContainer" containerID="ee8739c24f5a4e378d2976ff5191d8d9ed0806cda37799d60bc6057e6dd6029f" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.571958 4689 scope.go:117] "RemoveContainer" containerID="415abc379391f653942dc6239ef70369f0f0988cf5396b2f475091696322fbe7" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.652287 4689 scope.go:117] "RemoveContainer" containerID="3d49a105448f011c8629836c87cae15ed2c23eb6deb83b05e8fcfbe31650f4b8" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.711724 4689 scope.go:117] "RemoveContainer" containerID="a186d3e2c17ebd6163efc399489f67b9a8a9288c88753bf502bc3ac17fe4533d" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.769474 4689 scope.go:117] "RemoveContainer" containerID="1e7896a7475e31eb878cbeb40db4aae43db0d6a1b87f69e291ec1cf33c206a5a" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.826084 4689 scope.go:117] "RemoveContainer" containerID="efe38824785111121aa433fb517c756f0a9a150e2cdcef8d1d38970f3f22e666" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.877073 4689 scope.go:117] "RemoveContainer" containerID="cf98ae34220f17583994c83ab86552defca6675eb410b1464b4b6628a926baf7" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.916186 4689 scope.go:117] "RemoveContainer" containerID="d33a448b5a3cd50e82d1d994a8b9ff6284ec3b7305b426f184baec61300dab1d" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.942118 4689 scope.go:117] "RemoveContainer" containerID="6b089c8894446ce359c7f89f4395e5a23240a01be629445bef9d66d305ac36e5" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.973213 4689 scope.go:117] "RemoveContainer" containerID="5df8b086d4acc5fc2d31e8db12fc7195ff53936f2e6047c9d9e81ff71ee2bf4a" Jan 23 11:22:16 crc kubenswrapper[4689]: I0123 11:22:16.993597 4689 scope.go:117] "RemoveContainer" containerID="3481f853370372acd70a380420bb978c71911e6cd833279f2a67bb92a5b07161" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.029080 4689 scope.go:117] "RemoveContainer" containerID="e44c9b081b1be96d0028ac482eca3ad1922b272a7f17eae9d02f031ea67d8f10" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.067075 4689 scope.go:117] "RemoveContainer" containerID="8e918eb8aa6d378156b476e9476ed22b176b9ed2e62eee31b7b5f1c935fcbf53" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.093940 4689 scope.go:117] "RemoveContainer" containerID="b0e0a3dbfc36e0051e797e03a7d1af5a0b8c3d4b3fa9a5b6dfd3aad62fc577e5" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.115985 4689 scope.go:117] "RemoveContainer" containerID="a7a4725a9303f3380d60dd5c15a72dd74df6b104839ec08a02211bc9813fe5ad" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.143732 4689 scope.go:117] "RemoveContainer" containerID="2d1d5315ba6ae205ba4ee47ad522498e36b6fe3d8d4cdb35bc502bc993f4e683" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.174499 4689 scope.go:117] "RemoveContainer" containerID="4313a23814c78f447f1b1cb9da4a0d48eb7f485c0ee51748ef4fa98de1a87454" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.202920 4689 scope.go:117] "RemoveContainer" containerID="2c3887635fcf01b1eb4460e1770da411159c3add513b47bd4a3222c0fa782da9" Jan 23 11:22:17 crc kubenswrapper[4689]: I0123 11:22:17.228513 4689 scope.go:117] "RemoveContainer" containerID="315bac6a83cba198f9e00863f10cca377df580366bb3193f55b4f2da49e4ca4f" Jan 23 11:22:26 crc kubenswrapper[4689]: I0123 11:22:26.046279 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-5tlkt"] Jan 23 11:22:26 crc kubenswrapper[4689]: I0123 11:22:26.058502 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-5tlkt"] Jan 23 11:22:27 crc kubenswrapper[4689]: I0123 11:22:27.034857 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-b6958"] Jan 23 11:22:27 crc kubenswrapper[4689]: I0123 11:22:27.050927 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-b6958"] Jan 23 11:22:27 crc kubenswrapper[4689]: I0123 11:22:27.655325 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30a7d451-e70f-43cd-ae81-e5ccbdcb53f3" path="/var/lib/kubelet/pods/30a7d451-e70f-43cd-ae81-e5ccbdcb53f3/volumes" Jan 23 11:22:27 crc kubenswrapper[4689]: I0123 11:22:27.656969 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1455952-eacb-400c-bb65-d9d6ca95a674" path="/var/lib/kubelet/pods/c1455952-eacb-400c-bb65-d9d6ca95a674/volumes" Jan 23 11:22:31 crc kubenswrapper[4689]: I0123 11:22:31.075280 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-hm599"] Jan 23 11:22:31 crc kubenswrapper[4689]: I0123 11:22:31.100377 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-hm599"] Jan 23 11:22:31 crc kubenswrapper[4689]: I0123 11:22:31.654436 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5e5ddbf-b676-44a1-996e-a6aafe2280e5" path="/var/lib/kubelet/pods/a5e5ddbf-b676-44a1-996e-a6aafe2280e5/volumes" Jan 23 11:22:31 crc kubenswrapper[4689]: E0123 11:22:31.914807 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16110fd6_6d8f_4901_8f68_b155d2a27236.slice/crio-conmon-e7971324e3c57cba335266b568c582da6497a8e5eeaeb6afdaae5e501f993e7a.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:22:32 crc kubenswrapper[4689]: I0123 11:22:32.306978 4689 generic.go:334] "Generic (PLEG): container finished" podID="16110fd6-6d8f-4901-8f68-b155d2a27236" containerID="e7971324e3c57cba335266b568c582da6497a8e5eeaeb6afdaae5e501f993e7a" exitCode=0 Jan 23 11:22:32 crc kubenswrapper[4689]: I0123 11:22:32.307030 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" event={"ID":"16110fd6-6d8f-4901-8f68-b155d2a27236","Type":"ContainerDied","Data":"e7971324e3c57cba335266b568c582da6497a8e5eeaeb6afdaae5e501f993e7a"} Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.311434 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.311787 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.312282 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.313628 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"105d16365ad97c922e3f7df6b806bc3e7525acd81cd64b9ddd66539fe5554ac9"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.313753 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://105d16365ad97c922e3f7df6b806bc3e7525acd81cd64b9ddd66539fe5554ac9" gracePeriod=600 Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.837833 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.893043 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbwc9\" (UniqueName: \"kubernetes.io/projected/16110fd6-6d8f-4901-8f68-b155d2a27236-kube-api-access-mbwc9\") pod \"16110fd6-6d8f-4901-8f68-b155d2a27236\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.893172 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-ssh-key-openstack-edpm-ipam\") pod \"16110fd6-6d8f-4901-8f68-b155d2a27236\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.893328 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-inventory\") pod \"16110fd6-6d8f-4901-8f68-b155d2a27236\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.893478 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-bootstrap-combined-ca-bundle\") pod \"16110fd6-6d8f-4901-8f68-b155d2a27236\" (UID: \"16110fd6-6d8f-4901-8f68-b155d2a27236\") " Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.935448 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16110fd6-6d8f-4901-8f68-b155d2a27236-kube-api-access-mbwc9" (OuterVolumeSpecName: "kube-api-access-mbwc9") pod "16110fd6-6d8f-4901-8f68-b155d2a27236" (UID: "16110fd6-6d8f-4901-8f68-b155d2a27236"). InnerVolumeSpecName "kube-api-access-mbwc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.935861 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "16110fd6-6d8f-4901-8f68-b155d2a27236" (UID: "16110fd6-6d8f-4901-8f68-b155d2a27236"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.938436 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-inventory" (OuterVolumeSpecName: "inventory") pod "16110fd6-6d8f-4901-8f68-b155d2a27236" (UID: "16110fd6-6d8f-4901-8f68-b155d2a27236"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:22:33 crc kubenswrapper[4689]: I0123 11:22:33.991383 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "16110fd6-6d8f-4901-8f68-b155d2a27236" (UID: "16110fd6-6d8f-4901-8f68-b155d2a27236"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.002371 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbwc9\" (UniqueName: \"kubernetes.io/projected/16110fd6-6d8f-4901-8f68-b155d2a27236-kube-api-access-mbwc9\") on node \"crc\" DevicePath \"\"" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.002416 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.002430 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.002442 4689 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16110fd6-6d8f-4901-8f68-b155d2a27236-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.332043 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.332097 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj" event={"ID":"16110fd6-6d8f-4901-8f68-b155d2a27236","Type":"ContainerDied","Data":"8181c98c84671d4148ade12660ed0ee91ec1c5fb7cfdf19ddba350082981492f"} Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.332230 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8181c98c84671d4148ade12660ed0ee91ec1c5fb7cfdf19ddba350082981492f" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.335489 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="105d16365ad97c922e3f7df6b806bc3e7525acd81cd64b9ddd66539fe5554ac9" exitCode=0 Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.335570 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"105d16365ad97c922e3f7df6b806bc3e7525acd81cd64b9ddd66539fe5554ac9"} Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.335781 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f"} Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.335804 4689 scope.go:117] "RemoveContainer" containerID="7caea7e564998481676beb01a615f9fce56babbc0c7136df852b980a43a1621e" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.464425 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm"] Jan 23 11:22:34 crc kubenswrapper[4689]: E0123 11:22:34.464905 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="extract-utilities" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.464928 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="extract-utilities" Jan 23 11:22:34 crc kubenswrapper[4689]: E0123 11:22:34.464947 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16110fd6-6d8f-4901-8f68-b155d2a27236" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.464957 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="16110fd6-6d8f-4901-8f68-b155d2a27236" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 23 11:22:34 crc kubenswrapper[4689]: E0123 11:22:34.464991 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="extract-content" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.464999 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="extract-content" Jan 23 11:22:34 crc kubenswrapper[4689]: E0123 11:22:34.465026 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="registry-server" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.465033 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="registry-server" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.465284 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cbab070-713b-40d3-8f54-2867075c3c0d" containerName="registry-server" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.465313 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="16110fd6-6d8f-4901-8f68-b155d2a27236" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.466117 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.468812 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.469034 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.469318 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.469553 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.515080 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ddqs\" (UniqueName: \"kubernetes.io/projected/700a7d38-2c34-4c5e-a92e-62c448e4c6df-kube-api-access-7ddqs\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.515134 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.515217 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.618124 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ddqs\" (UniqueName: \"kubernetes.io/projected/700a7d38-2c34-4c5e-a92e-62c448e4c6df-kube-api-access-7ddqs\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.618209 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.618274 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.964700 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.965306 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ddqs\" (UniqueName: \"kubernetes.io/projected/700a7d38-2c34-4c5e-a92e-62c448e4c6df-kube-api-access-7ddqs\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:34 crc kubenswrapper[4689]: I0123 11:22:34.971083 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:35 crc kubenswrapper[4689]: I0123 11:22:35.023852 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm"] Jan 23 11:22:35 crc kubenswrapper[4689]: I0123 11:22:35.108068 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:22:35 crc kubenswrapper[4689]: W0123 11:22:35.753946 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod700a7d38_2c34_4c5e_a92e_62c448e4c6df.slice/crio-97696c5786f933c2b101d370bb38439242925ee0fd4ac5663d1e7129b6e62fbc WatchSource:0}: Error finding container 97696c5786f933c2b101d370bb38439242925ee0fd4ac5663d1e7129b6e62fbc: Status 404 returned error can't find the container with id 97696c5786f933c2b101d370bb38439242925ee0fd4ac5663d1e7129b6e62fbc Jan 23 11:22:35 crc kubenswrapper[4689]: I0123 11:22:35.755439 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm"] Jan 23 11:22:36 crc kubenswrapper[4689]: I0123 11:22:36.361803 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" event={"ID":"700a7d38-2c34-4c5e-a92e-62c448e4c6df","Type":"ContainerStarted","Data":"97696c5786f933c2b101d370bb38439242925ee0fd4ac5663d1e7129b6e62fbc"} Jan 23 11:22:38 crc kubenswrapper[4689]: I0123 11:22:38.396502 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" event={"ID":"700a7d38-2c34-4c5e-a92e-62c448e4c6df","Type":"ContainerStarted","Data":"5642fec1941df15deaff71e6fdc6196de20e3d12b12107b9d5ce89d69122efbc"} Jan 23 11:22:38 crc kubenswrapper[4689]: I0123 11:22:38.414715 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" podStartSLOduration=2.5726697979999997 podStartE2EDuration="4.414693899s" podCreationTimestamp="2026-01-23 11:22:34 +0000 UTC" firstStartedPulling="2026-01-23 11:22:35.7566124 +0000 UTC m=+2020.381292259" lastFinishedPulling="2026-01-23 11:22:37.598636481 +0000 UTC m=+2022.223316360" observedRunningTime="2026-01-23 11:22:38.414544165 +0000 UTC m=+2023.039224034" watchObservedRunningTime="2026-01-23 11:22:38.414693899 +0000 UTC m=+2023.039373758" Jan 23 11:22:39 crc kubenswrapper[4689]: I0123 11:22:39.039709 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8nxsm"] Jan 23 11:22:39 crc kubenswrapper[4689]: I0123 11:22:39.054736 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8nxsm"] Jan 23 11:22:39 crc kubenswrapper[4689]: I0123 11:22:39.654545 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8604732-adb6-4e50-b9a4-107ebb88d8a4" path="/var/lib/kubelet/pods/a8604732-adb6-4e50-b9a4-107ebb88d8a4/volumes" Jan 23 11:22:50 crc kubenswrapper[4689]: I0123 11:22:50.057837 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-cn9tn"] Jan 23 11:22:50 crc kubenswrapper[4689]: I0123 11:22:50.069849 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-cn9tn"] Jan 23 11:22:51 crc kubenswrapper[4689]: I0123 11:22:51.659274 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7" path="/var/lib/kubelet/pods/3bee1c65-eb2e-4981-b3a3-1f83ef6a71c7/volumes" Jan 23 11:22:52 crc kubenswrapper[4689]: I0123 11:22:52.034109 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-lr5lm"] Jan 23 11:22:52 crc kubenswrapper[4689]: I0123 11:22:52.057514 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-lr5lm"] Jan 23 11:22:53 crc kubenswrapper[4689]: I0123 11:22:53.654944 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b74dafce-64c6-4c46-886b-bdc4044b9b1e" path="/var/lib/kubelet/pods/b74dafce-64c6-4c46-886b-bdc4044b9b1e/volumes" Jan 23 11:23:17 crc kubenswrapper[4689]: I0123 11:23:17.820584 4689 scope.go:117] "RemoveContainer" containerID="3f1a66de677ef8d25a2a9e01ba012731015e2c73227088db8d19f5312fd16bb5" Jan 23 11:23:17 crc kubenswrapper[4689]: I0123 11:23:17.866105 4689 scope.go:117] "RemoveContainer" containerID="08429da34f62eda07b0b08f13139d9936a2dea2067c362ef1289bc04d2a9ae36" Jan 23 11:23:17 crc kubenswrapper[4689]: I0123 11:23:17.920461 4689 scope.go:117] "RemoveContainer" containerID="390a1755d4c44f561a50ea749baa8e36f0681bd1a5e7f51c9c5511eddbf23124" Jan 23 11:23:18 crc kubenswrapper[4689]: I0123 11:23:18.038073 4689 scope.go:117] "RemoveContainer" containerID="144c9a3dc48192981a75f2216df78a5a958e3ae3631dac62066239a49a785d73" Jan 23 11:23:18 crc kubenswrapper[4689]: I0123 11:23:18.094373 4689 scope.go:117] "RemoveContainer" containerID="b5902f87c0cc24da5e2a217c5bbb5034f9f2209854c8e33f40f6890cc8946339" Jan 23 11:23:18 crc kubenswrapper[4689]: I0123 11:23:18.151552 4689 scope.go:117] "RemoveContainer" containerID="dbcfe4b3631f3b06f1321ff9886d5690fa756317c085de7396feba6e6e7ca0fe" Jan 23 11:23:18 crc kubenswrapper[4689]: I0123 11:23:18.176484 4689 scope.go:117] "RemoveContainer" containerID="80939d29045856874080702ae67aa434c0d30ce973cc878776ecfd85799da35b" Jan 23 11:23:18 crc kubenswrapper[4689]: I0123 11:23:18.206119 4689 scope.go:117] "RemoveContainer" containerID="ba98e616f38592c4add977eb79cbd3f3187315d39c359a07a765aa5294a9eb3e" Jan 23 11:23:18 crc kubenswrapper[4689]: I0123 11:23:18.250394 4689 scope.go:117] "RemoveContainer" containerID="d43e229a28663b1c0b272376a2cd80cc5b23a03ee80a4ee7023c4acf2fd9cc3d" Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.050191 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-r9b2p"] Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.061196 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-7hcxl"] Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.072780 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-cnrbr"] Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.082776 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-7hcxl"] Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.092295 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-r9b2p"] Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.101859 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-cnrbr"] Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.659803 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a91b673-3b20-4718-91f0-695225bc7f82" path="/var/lib/kubelet/pods/2a91b673-3b20-4718-91f0-695225bc7f82/volumes" Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.660453 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f3f3ef3-0b10-4d91-8147-3ea51947dc78" path="/var/lib/kubelet/pods/9f3f3ef3-0b10-4d91-8147-3ea51947dc78/volumes" Jan 23 11:23:45 crc kubenswrapper[4689]: I0123 11:23:45.661023 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4348edb-5864-4652-9e63-b2c452905118" path="/var/lib/kubelet/pods/d4348edb-5864-4652-9e63-b2c452905118/volumes" Jan 23 11:23:47 crc kubenswrapper[4689]: I0123 11:23:47.035855 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-f6ce-account-create-update-6j84v"] Jan 23 11:23:47 crc kubenswrapper[4689]: I0123 11:23:47.048084 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-f6ce-account-create-update-6j84v"] Jan 23 11:23:47 crc kubenswrapper[4689]: I0123 11:23:47.661249 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b25887e-9a4b-4100-b672-2d46c34cf1e0" path="/var/lib/kubelet/pods/1b25887e-9a4b-4100-b672-2d46c34cf1e0/volumes" Jan 23 11:23:48 crc kubenswrapper[4689]: I0123 11:23:48.036552 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f707-account-create-update-w6nbh"] Jan 23 11:23:48 crc kubenswrapper[4689]: I0123 11:23:48.049168 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-c4d1-account-create-update-9rq2z"] Jan 23 11:23:48 crc kubenswrapper[4689]: I0123 11:23:48.061180 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f707-account-create-update-w6nbh"] Jan 23 11:23:48 crc kubenswrapper[4689]: I0123 11:23:48.070685 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-c4d1-account-create-update-9rq2z"] Jan 23 11:23:49 crc kubenswrapper[4689]: I0123 11:23:49.655528 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2776dc4d-d084-461d-9d4a-d4cccfcb1dc2" path="/var/lib/kubelet/pods/2776dc4d-d084-461d-9d4a-d4cccfcb1dc2/volumes" Jan 23 11:23:49 crc kubenswrapper[4689]: I0123 11:23:49.657658 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cde63b4-6033-4530-884d-55d2274538c5" path="/var/lib/kubelet/pods/3cde63b4-6033-4530-884d-55d2274538c5/volumes" Jan 23 11:24:18 crc kubenswrapper[4689]: I0123 11:24:18.407652 4689 scope.go:117] "RemoveContainer" containerID="a2d518ab19a834f9b0667f069e1a0c90ff48526c26098f724789fccd3c1f9f82" Jan 23 11:24:18 crc kubenswrapper[4689]: I0123 11:24:18.457589 4689 scope.go:117] "RemoveContainer" containerID="cc9c8c96062815e530ed52a1f36964b78b7f414e15de42a2b52070d1c59cc6fa" Jan 23 11:24:18 crc kubenswrapper[4689]: I0123 11:24:18.509088 4689 scope.go:117] "RemoveContainer" containerID="6987a4d4001346ee6eb2af00615ecf5892d3675072e34ffd2645b894d0937017" Jan 23 11:24:18 crc kubenswrapper[4689]: I0123 11:24:18.589737 4689 scope.go:117] "RemoveContainer" containerID="1c42ba5d5820ff524a755e99c8a19d0b72c310815da77fe48ecaffb373f3cc9a" Jan 23 11:24:18 crc kubenswrapper[4689]: I0123 11:24:18.654135 4689 scope.go:117] "RemoveContainer" containerID="d1df398695aeb7ef490b25b64a8d073b39e1ae879be5ef454a2f3d5631247c83" Jan 23 11:24:18 crc kubenswrapper[4689]: I0123 11:24:18.721844 4689 scope.go:117] "RemoveContainer" containerID="dbb63de3c26be396b137942e59c85a669b09ac7215bfe4aedd0ab92ff5715222" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.152846 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kvf8t"] Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.156514 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.185016 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kvf8t"] Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.248277 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs6f8\" (UniqueName: \"kubernetes.io/projected/331b757c-cffe-4de5-9cc3-c726fdbf9954-kube-api-access-bs6f8\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.248724 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-utilities\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.249066 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-catalog-content\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.353409 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs6f8\" (UniqueName: \"kubernetes.io/projected/331b757c-cffe-4de5-9cc3-c726fdbf9954-kube-api-access-bs6f8\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.353560 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-utilities\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.353680 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-catalog-content\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.354524 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-utilities\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.354537 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-catalog-content\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.394267 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs6f8\" (UniqueName: \"kubernetes.io/projected/331b757c-cffe-4de5-9cc3-c726fdbf9954-kube-api-access-bs6f8\") pod \"certified-operators-kvf8t\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:26 crc kubenswrapper[4689]: I0123 11:24:26.478739 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:27 crc kubenswrapper[4689]: I0123 11:24:27.156321 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kvf8t"] Jan 23 11:24:27 crc kubenswrapper[4689]: W0123 11:24:27.158411 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod331b757c_cffe_4de5_9cc3_c726fdbf9954.slice/crio-7aea772307d8ef86584c075ffe0511d994b3ac84ea56cb898330e8f1ddffff37 WatchSource:0}: Error finding container 7aea772307d8ef86584c075ffe0511d994b3ac84ea56cb898330e8f1ddffff37: Status 404 returned error can't find the container with id 7aea772307d8ef86584c075ffe0511d994b3ac84ea56cb898330e8f1ddffff37 Jan 23 11:24:27 crc kubenswrapper[4689]: I0123 11:24:27.648928 4689 generic.go:334] "Generic (PLEG): container finished" podID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerID="e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851" exitCode=0 Jan 23 11:24:27 crc kubenswrapper[4689]: I0123 11:24:27.652951 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvf8t" event={"ID":"331b757c-cffe-4de5-9cc3-c726fdbf9954","Type":"ContainerDied","Data":"e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851"} Jan 23 11:24:27 crc kubenswrapper[4689]: I0123 11:24:27.652980 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvf8t" event={"ID":"331b757c-cffe-4de5-9cc3-c726fdbf9954","Type":"ContainerStarted","Data":"7aea772307d8ef86584c075ffe0511d994b3ac84ea56cb898330e8f1ddffff37"} Jan 23 11:24:28 crc kubenswrapper[4689]: I0123 11:24:28.662251 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvf8t" event={"ID":"331b757c-cffe-4de5-9cc3-c726fdbf9954","Type":"ContainerStarted","Data":"037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2"} Jan 23 11:24:29 crc kubenswrapper[4689]: I0123 11:24:29.672348 4689 generic.go:334] "Generic (PLEG): container finished" podID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerID="037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2" exitCode=0 Jan 23 11:24:29 crc kubenswrapper[4689]: I0123 11:24:29.672399 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvf8t" event={"ID":"331b757c-cffe-4de5-9cc3-c726fdbf9954","Type":"ContainerDied","Data":"037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2"} Jan 23 11:24:30 crc kubenswrapper[4689]: I0123 11:24:30.073652 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fq49m"] Jan 23 11:24:30 crc kubenswrapper[4689]: I0123 11:24:30.104366 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fq49m"] Jan 23 11:24:30 crc kubenswrapper[4689]: I0123 11:24:30.684456 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvf8t" event={"ID":"331b757c-cffe-4de5-9cc3-c726fdbf9954","Type":"ContainerStarted","Data":"69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1"} Jan 23 11:24:30 crc kubenswrapper[4689]: I0123 11:24:30.706306 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kvf8t" podStartSLOduration=2.283017816 podStartE2EDuration="4.706281931s" podCreationTimestamp="2026-01-23 11:24:26 +0000 UTC" firstStartedPulling="2026-01-23 11:24:27.650864234 +0000 UTC m=+2132.275544093" lastFinishedPulling="2026-01-23 11:24:30.074128339 +0000 UTC m=+2134.698808208" observedRunningTime="2026-01-23 11:24:30.702381043 +0000 UTC m=+2135.327060912" watchObservedRunningTime="2026-01-23 11:24:30.706281931 +0000 UTC m=+2135.330961790" Jan 23 11:24:31 crc kubenswrapper[4689]: I0123 11:24:31.662612 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f705ffdf-98ca-48b4-bd00-1a4804326940" path="/var/lib/kubelet/pods/f705ffdf-98ca-48b4-bd00-1a4804326940/volumes" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.341332 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bk2zs"] Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.345045 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.376229 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bk2zs"] Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.434822 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-catalog-content\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.435736 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-utilities\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.435991 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp2jk\" (UniqueName: \"kubernetes.io/projected/453a344a-7566-4eff-bdf4-65cfbbe15f8c-kube-api-access-wp2jk\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.538300 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-catalog-content\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.538515 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-utilities\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.538586 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp2jk\" (UniqueName: \"kubernetes.io/projected/453a344a-7566-4eff-bdf4-65cfbbe15f8c-kube-api-access-wp2jk\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.538932 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-catalog-content\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.538977 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-utilities\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.560626 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp2jk\" (UniqueName: \"kubernetes.io/projected/453a344a-7566-4eff-bdf4-65cfbbe15f8c-kube-api-access-wp2jk\") pod \"community-operators-bk2zs\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:32 crc kubenswrapper[4689]: I0123 11:24:32.671951 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:33 crc kubenswrapper[4689]: I0123 11:24:33.258385 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bk2zs"] Jan 23 11:24:33 crc kubenswrapper[4689]: I0123 11:24:33.311136 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:24:33 crc kubenswrapper[4689]: I0123 11:24:33.311213 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:24:33 crc kubenswrapper[4689]: I0123 11:24:33.725346 4689 generic.go:334] "Generic (PLEG): container finished" podID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerID="eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff" exitCode=0 Jan 23 11:24:33 crc kubenswrapper[4689]: I0123 11:24:33.725424 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk2zs" event={"ID":"453a344a-7566-4eff-bdf4-65cfbbe15f8c","Type":"ContainerDied","Data":"eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff"} Jan 23 11:24:33 crc kubenswrapper[4689]: I0123 11:24:33.725693 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk2zs" event={"ID":"453a344a-7566-4eff-bdf4-65cfbbe15f8c","Type":"ContainerStarted","Data":"c63b4b203edd75a9219ed78da1fa63c891e586e5d78038cf6036fd63a622cdad"} Jan 23 11:24:34 crc kubenswrapper[4689]: I0123 11:24:34.755457 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk2zs" event={"ID":"453a344a-7566-4eff-bdf4-65cfbbe15f8c","Type":"ContainerStarted","Data":"09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687"} Jan 23 11:24:36 crc kubenswrapper[4689]: I0123 11:24:36.479551 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:36 crc kubenswrapper[4689]: I0123 11:24:36.480287 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:36 crc kubenswrapper[4689]: I0123 11:24:36.530140 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:36 crc kubenswrapper[4689]: I0123 11:24:36.781677 4689 generic.go:334] "Generic (PLEG): container finished" podID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerID="09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687" exitCode=0 Jan 23 11:24:36 crc kubenswrapper[4689]: I0123 11:24:36.781754 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk2zs" event={"ID":"453a344a-7566-4eff-bdf4-65cfbbe15f8c","Type":"ContainerDied","Data":"09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687"} Jan 23 11:24:36 crc kubenswrapper[4689]: I0123 11:24:36.842027 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:37 crc kubenswrapper[4689]: I0123 11:24:37.797442 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk2zs" event={"ID":"453a344a-7566-4eff-bdf4-65cfbbe15f8c","Type":"ContainerStarted","Data":"f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5"} Jan 23 11:24:37 crc kubenswrapper[4689]: I0123 11:24:37.822018 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bk2zs" podStartSLOduration=2.344076561 podStartE2EDuration="5.821988507s" podCreationTimestamp="2026-01-23 11:24:32 +0000 UTC" firstStartedPulling="2026-01-23 11:24:33.729047946 +0000 UTC m=+2138.353727845" lastFinishedPulling="2026-01-23 11:24:37.206959932 +0000 UTC m=+2141.831639791" observedRunningTime="2026-01-23 11:24:37.813385912 +0000 UTC m=+2142.438065761" watchObservedRunningTime="2026-01-23 11:24:37.821988507 +0000 UTC m=+2142.446668396" Jan 23 11:24:38 crc kubenswrapper[4689]: I0123 11:24:38.528325 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kvf8t"] Jan 23 11:24:38 crc kubenswrapper[4689]: I0123 11:24:38.806321 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kvf8t" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="registry-server" containerID="cri-o://69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1" gracePeriod=2 Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.384178 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.517513 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs6f8\" (UniqueName: \"kubernetes.io/projected/331b757c-cffe-4de5-9cc3-c726fdbf9954-kube-api-access-bs6f8\") pod \"331b757c-cffe-4de5-9cc3-c726fdbf9954\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.517620 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-catalog-content\") pod \"331b757c-cffe-4de5-9cc3-c726fdbf9954\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.517707 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-utilities\") pod \"331b757c-cffe-4de5-9cc3-c726fdbf9954\" (UID: \"331b757c-cffe-4de5-9cc3-c726fdbf9954\") " Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.518593 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-utilities" (OuterVolumeSpecName: "utilities") pod "331b757c-cffe-4de5-9cc3-c726fdbf9954" (UID: "331b757c-cffe-4de5-9cc3-c726fdbf9954"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.551346 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/331b757c-cffe-4de5-9cc3-c726fdbf9954-kube-api-access-bs6f8" (OuterVolumeSpecName: "kube-api-access-bs6f8") pod "331b757c-cffe-4de5-9cc3-c726fdbf9954" (UID: "331b757c-cffe-4de5-9cc3-c726fdbf9954"). InnerVolumeSpecName "kube-api-access-bs6f8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.626378 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs6f8\" (UniqueName: \"kubernetes.io/projected/331b757c-cffe-4de5-9cc3-c726fdbf9954-kube-api-access-bs6f8\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.626412 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.634058 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "331b757c-cffe-4de5-9cc3-c726fdbf9954" (UID: "331b757c-cffe-4de5-9cc3-c726fdbf9954"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.728038 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331b757c-cffe-4de5-9cc3-c726fdbf9954-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.818755 4689 generic.go:334] "Generic (PLEG): container finished" podID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerID="69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1" exitCode=0 Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.818800 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvf8t" event={"ID":"331b757c-cffe-4de5-9cc3-c726fdbf9954","Type":"ContainerDied","Data":"69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1"} Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.818815 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvf8t" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.818824 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvf8t" event={"ID":"331b757c-cffe-4de5-9cc3-c726fdbf9954","Type":"ContainerDied","Data":"7aea772307d8ef86584c075ffe0511d994b3ac84ea56cb898330e8f1ddffff37"} Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.818845 4689 scope.go:117] "RemoveContainer" containerID="69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.845786 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kvf8t"] Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.851580 4689 scope.go:117] "RemoveContainer" containerID="037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.856754 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kvf8t"] Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.892853 4689 scope.go:117] "RemoveContainer" containerID="e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.929299 4689 scope.go:117] "RemoveContainer" containerID="69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1" Jan 23 11:24:39 crc kubenswrapper[4689]: E0123 11:24:39.930567 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1\": container with ID starting with 69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1 not found: ID does not exist" containerID="69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.930663 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1"} err="failed to get container status \"69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1\": rpc error: code = NotFound desc = could not find container \"69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1\": container with ID starting with 69678c3c74c8e7b5c4ed566b767da8262c28a29057118342812830578dd714e1 not found: ID does not exist" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.930689 4689 scope.go:117] "RemoveContainer" containerID="037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2" Jan 23 11:24:39 crc kubenswrapper[4689]: E0123 11:24:39.930906 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2\": container with ID starting with 037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2 not found: ID does not exist" containerID="037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.930932 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2"} err="failed to get container status \"037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2\": rpc error: code = NotFound desc = could not find container \"037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2\": container with ID starting with 037d02c262cc35a4712ea5b9bf08ce03f893376c8315b1094e3b03971473def2 not found: ID does not exist" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.930945 4689 scope.go:117] "RemoveContainer" containerID="e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851" Jan 23 11:24:39 crc kubenswrapper[4689]: E0123 11:24:39.931164 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851\": container with ID starting with e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851 not found: ID does not exist" containerID="e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851" Jan 23 11:24:39 crc kubenswrapper[4689]: I0123 11:24:39.931192 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851"} err="failed to get container status \"e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851\": rpc error: code = NotFound desc = could not find container \"e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851\": container with ID starting with e4bd42e24396eefcbac65b04abbb33ed03826b6c0b3f9bd31ce014181e23a851 not found: ID does not exist" Jan 23 11:24:41 crc kubenswrapper[4689]: I0123 11:24:41.656170 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" path="/var/lib/kubelet/pods/331b757c-cffe-4de5-9cc3-c726fdbf9954/volumes" Jan 23 11:24:42 crc kubenswrapper[4689]: I0123 11:24:42.672851 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:42 crc kubenswrapper[4689]: I0123 11:24:42.673373 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:42 crc kubenswrapper[4689]: I0123 11:24:42.742007 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:42 crc kubenswrapper[4689]: I0123 11:24:42.902878 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:43 crc kubenswrapper[4689]: I0123 11:24:43.865124 4689 generic.go:334] "Generic (PLEG): container finished" podID="700a7d38-2c34-4c5e-a92e-62c448e4c6df" containerID="5642fec1941df15deaff71e6fdc6196de20e3d12b12107b9d5ce89d69122efbc" exitCode=0 Jan 23 11:24:43 crc kubenswrapper[4689]: I0123 11:24:43.865209 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" event={"ID":"700a7d38-2c34-4c5e-a92e-62c448e4c6df","Type":"ContainerDied","Data":"5642fec1941df15deaff71e6fdc6196de20e3d12b12107b9d5ce89d69122efbc"} Jan 23 11:24:43 crc kubenswrapper[4689]: I0123 11:24:43.924910 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bk2zs"] Jan 23 11:24:44 crc kubenswrapper[4689]: I0123 11:24:44.875840 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bk2zs" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="registry-server" containerID="cri-o://f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5" gracePeriod=2 Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.483299 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.497529 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.609305 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-inventory\") pod \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.609348 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-ssh-key-openstack-edpm-ipam\") pod \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.609365 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp2jk\" (UniqueName: \"kubernetes.io/projected/453a344a-7566-4eff-bdf4-65cfbbe15f8c-kube-api-access-wp2jk\") pod \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.609408 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ddqs\" (UniqueName: \"kubernetes.io/projected/700a7d38-2c34-4c5e-a92e-62c448e4c6df-kube-api-access-7ddqs\") pod \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\" (UID: \"700a7d38-2c34-4c5e-a92e-62c448e4c6df\") " Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.610097 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-utilities\") pod \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.610360 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-catalog-content\") pod \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\" (UID: \"453a344a-7566-4eff-bdf4-65cfbbe15f8c\") " Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.610837 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-utilities" (OuterVolumeSpecName: "utilities") pod "453a344a-7566-4eff-bdf4-65cfbbe15f8c" (UID: "453a344a-7566-4eff-bdf4-65cfbbe15f8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.615354 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/700a7d38-2c34-4c5e-a92e-62c448e4c6df-kube-api-access-7ddqs" (OuterVolumeSpecName: "kube-api-access-7ddqs") pod "700a7d38-2c34-4c5e-a92e-62c448e4c6df" (UID: "700a7d38-2c34-4c5e-a92e-62c448e4c6df"). InnerVolumeSpecName "kube-api-access-7ddqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.616919 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/453a344a-7566-4eff-bdf4-65cfbbe15f8c-kube-api-access-wp2jk" (OuterVolumeSpecName: "kube-api-access-wp2jk") pod "453a344a-7566-4eff-bdf4-65cfbbe15f8c" (UID: "453a344a-7566-4eff-bdf4-65cfbbe15f8c"). InnerVolumeSpecName "kube-api-access-wp2jk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.642644 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "700a7d38-2c34-4c5e-a92e-62c448e4c6df" (UID: "700a7d38-2c34-4c5e-a92e-62c448e4c6df"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.653415 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-inventory" (OuterVolumeSpecName: "inventory") pod "700a7d38-2c34-4c5e-a92e-62c448e4c6df" (UID: "700a7d38-2c34-4c5e-a92e-62c448e4c6df"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.669013 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "453a344a-7566-4eff-bdf4-65cfbbe15f8c" (UID: "453a344a-7566-4eff-bdf4-65cfbbe15f8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.712042 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.712082 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.712092 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/700a7d38-2c34-4c5e-a92e-62c448e4c6df-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.712104 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp2jk\" (UniqueName: \"kubernetes.io/projected/453a344a-7566-4eff-bdf4-65cfbbe15f8c-kube-api-access-wp2jk\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.712113 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ddqs\" (UniqueName: \"kubernetes.io/projected/700a7d38-2c34-4c5e-a92e-62c448e4c6df-kube-api-access-7ddqs\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.712121 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/453a344a-7566-4eff-bdf4-65cfbbe15f8c-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.885788 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.885802 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm" event={"ID":"700a7d38-2c34-4c5e-a92e-62c448e4c6df","Type":"ContainerDied","Data":"97696c5786f933c2b101d370bb38439242925ee0fd4ac5663d1e7129b6e62fbc"} Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.885840 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97696c5786f933c2b101d370bb38439242925ee0fd4ac5663d1e7129b6e62fbc" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.888820 4689 generic.go:334] "Generic (PLEG): container finished" podID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerID="f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5" exitCode=0 Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.888920 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk2zs" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.888951 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk2zs" event={"ID":"453a344a-7566-4eff-bdf4-65cfbbe15f8c","Type":"ContainerDied","Data":"f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5"} Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.888989 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk2zs" event={"ID":"453a344a-7566-4eff-bdf4-65cfbbe15f8c","Type":"ContainerDied","Data":"c63b4b203edd75a9219ed78da1fa63c891e586e5d78038cf6036fd63a622cdad"} Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.889034 4689 scope.go:117] "RemoveContainer" containerID="f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.923338 4689 scope.go:117] "RemoveContainer" containerID="09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.937334 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bk2zs"] Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.946950 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bk2zs"] Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.967877 4689 scope.go:117] "RemoveContainer" containerID="eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.983834 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp"] Jan 23 11:24:45 crc kubenswrapper[4689]: E0123 11:24:45.984340 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="registry-server" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984360 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="registry-server" Jan 23 11:24:45 crc kubenswrapper[4689]: E0123 11:24:45.984396 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700a7d38-2c34-4c5e-a92e-62c448e4c6df" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984407 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="700a7d38-2c34-4c5e-a92e-62c448e4c6df" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 23 11:24:45 crc kubenswrapper[4689]: E0123 11:24:45.984423 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="registry-server" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984432 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="registry-server" Jan 23 11:24:45 crc kubenswrapper[4689]: E0123 11:24:45.984460 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="extract-content" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984468 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="extract-content" Jan 23 11:24:45 crc kubenswrapper[4689]: E0123 11:24:45.984484 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="extract-content" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984492 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="extract-content" Jan 23 11:24:45 crc kubenswrapper[4689]: E0123 11:24:45.984510 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="extract-utilities" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984518 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="extract-utilities" Jan 23 11:24:45 crc kubenswrapper[4689]: E0123 11:24:45.984537 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="extract-utilities" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984546 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="extract-utilities" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984938 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="700a7d38-2c34-4c5e-a92e-62c448e4c6df" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984968 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" containerName="registry-server" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.984992 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="331b757c-cffe-4de5-9cc3-c726fdbf9954" containerName="registry-server" Jan 23 11:24:45 crc kubenswrapper[4689]: I0123 11:24:45.986847 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.000649 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.000917 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.001017 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.001106 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.019309 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjh7r\" (UniqueName: \"kubernetes.io/projected/d2250236-884b-4557-b447-5f6fe512fbdf-kube-api-access-zjh7r\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.019445 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.019473 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.021086 4689 scope.go:117] "RemoveContainer" containerID="f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.021289 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp"] Jan 23 11:24:46 crc kubenswrapper[4689]: E0123 11:24:46.023763 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5\": container with ID starting with f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5 not found: ID does not exist" containerID="f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.023806 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5"} err="failed to get container status \"f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5\": rpc error: code = NotFound desc = could not find container \"f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5\": container with ID starting with f7bc6499f4fe943e777bee9e2ce97e93a8dcf6809d3a3623c272ba38566998d5 not found: ID does not exist" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.023900 4689 scope.go:117] "RemoveContainer" containerID="09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687" Jan 23 11:24:46 crc kubenswrapper[4689]: E0123 11:24:46.024463 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687\": container with ID starting with 09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687 not found: ID does not exist" containerID="09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.024485 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687"} err="failed to get container status \"09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687\": rpc error: code = NotFound desc = could not find container \"09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687\": container with ID starting with 09133f507366746ab535a4f77c0f60915e7689d487316b170883f51fb36ef687 not found: ID does not exist" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.024497 4689 scope.go:117] "RemoveContainer" containerID="eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff" Jan 23 11:24:46 crc kubenswrapper[4689]: E0123 11:24:46.026444 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff\": container with ID starting with eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff not found: ID does not exist" containerID="eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.026484 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff"} err="failed to get container status \"eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff\": rpc error: code = NotFound desc = could not find container \"eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff\": container with ID starting with eb0a1cf13d86b764269a9cb709de5eaac1925186d1b6b33837f9689f216acaff not found: ID does not exist" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.121099 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjh7r\" (UniqueName: \"kubernetes.io/projected/d2250236-884b-4557-b447-5f6fe512fbdf-kube-api-access-zjh7r\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.121216 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.121237 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.126288 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.128496 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.138792 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjh7r\" (UniqueName: \"kubernetes.io/projected/d2250236-884b-4557-b447-5f6fe512fbdf-kube-api-access-zjh7r\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.372343 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:24:46 crc kubenswrapper[4689]: W0123 11:24:46.907517 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2250236_884b_4557_b447_5f6fe512fbdf.slice/crio-7694a4a227c0a7690301fbc3fa1cfbe7fa88a1b8e48dbbbb5fdfa32055584817 WatchSource:0}: Error finding container 7694a4a227c0a7690301fbc3fa1cfbe7fa88a1b8e48dbbbb5fdfa32055584817: Status 404 returned error can't find the container with id 7694a4a227c0a7690301fbc3fa1cfbe7fa88a1b8e48dbbbb5fdfa32055584817 Jan 23 11:24:46 crc kubenswrapper[4689]: I0123 11:24:46.907561 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp"] Jan 23 11:24:47 crc kubenswrapper[4689]: I0123 11:24:47.653494 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="453a344a-7566-4eff-bdf4-65cfbbe15f8c" path="/var/lib/kubelet/pods/453a344a-7566-4eff-bdf4-65cfbbe15f8c/volumes" Jan 23 11:24:47 crc kubenswrapper[4689]: I0123 11:24:47.934988 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" event={"ID":"d2250236-884b-4557-b447-5f6fe512fbdf","Type":"ContainerStarted","Data":"448c4882dd8038bf41aeca5d729eaa83aeee1b1d636223be79bcf4b3ff0b2a41"} Jan 23 11:24:47 crc kubenswrapper[4689]: I0123 11:24:47.935509 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" event={"ID":"d2250236-884b-4557-b447-5f6fe512fbdf","Type":"ContainerStarted","Data":"7694a4a227c0a7690301fbc3fa1cfbe7fa88a1b8e48dbbbb5fdfa32055584817"} Jan 23 11:24:47 crc kubenswrapper[4689]: I0123 11:24:47.962070 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" podStartSLOduration=2.443259228 podStartE2EDuration="2.962044983s" podCreationTimestamp="2026-01-23 11:24:45 +0000 UTC" firstStartedPulling="2026-01-23 11:24:46.909774675 +0000 UTC m=+2151.534454534" lastFinishedPulling="2026-01-23 11:24:47.42856039 +0000 UTC m=+2152.053240289" observedRunningTime="2026-01-23 11:24:47.953332986 +0000 UTC m=+2152.578012845" watchObservedRunningTime="2026-01-23 11:24:47.962044983 +0000 UTC m=+2152.586724842" Jan 23 11:24:57 crc kubenswrapper[4689]: I0123 11:24:57.036667 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-jl9nk"] Jan 23 11:24:57 crc kubenswrapper[4689]: I0123 11:24:57.049447 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-jl9nk"] Jan 23 11:24:57 crc kubenswrapper[4689]: I0123 11:24:57.657447 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2f42c42-b409-4a81-ae96-5b5a3b62263f" path="/var/lib/kubelet/pods/f2f42c42-b409-4a81-ae96-5b5a3b62263f/volumes" Jan 23 11:24:58 crc kubenswrapper[4689]: I0123 11:24:58.040827 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6lvmx"] Jan 23 11:24:58 crc kubenswrapper[4689]: I0123 11:24:58.054682 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6lvmx"] Jan 23 11:24:59 crc kubenswrapper[4689]: I0123 11:24:59.654876 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1a086a5-30c9-425a-9cc8-bfc7ff439d23" path="/var/lib/kubelet/pods/b1a086a5-30c9-425a-9cc8-bfc7ff439d23/volumes" Jan 23 11:25:00 crc kubenswrapper[4689]: I0123 11:25:00.037412 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-x9w8s"] Jan 23 11:25:00 crc kubenswrapper[4689]: I0123 11:25:00.046913 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-1b36-account-create-update-k4c7s"] Jan 23 11:25:00 crc kubenswrapper[4689]: I0123 11:25:00.057786 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-x9w8s"] Jan 23 11:25:00 crc kubenswrapper[4689]: I0123 11:25:00.066936 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-1b36-account-create-update-k4c7s"] Jan 23 11:25:01 crc kubenswrapper[4689]: I0123 11:25:01.667436 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="007e6c9b-000b-4516-9caf-a5c8204515eb" path="/var/lib/kubelet/pods/007e6c9b-000b-4516-9caf-a5c8204515eb/volumes" Jan 23 11:25:01 crc kubenswrapper[4689]: I0123 11:25:01.669787 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9787cfb5-0f46-4f88-a9e2-608370561edb" path="/var/lib/kubelet/pods/9787cfb5-0f46-4f88-a9e2-608370561edb/volumes" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.078484 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5f6kq"] Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.081846 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.093066 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5f6kq"] Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.186985 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjnxc\" (UniqueName: \"kubernetes.io/projected/fdaae371-5a2d-4766-a947-752333930f8f-kube-api-access-kjnxc\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.187180 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-utilities\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.187525 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-catalog-content\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.290678 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-catalog-content\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.290930 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjnxc\" (UniqueName: \"kubernetes.io/projected/fdaae371-5a2d-4766-a947-752333930f8f-kube-api-access-kjnxc\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.291007 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-utilities\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.291275 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-catalog-content\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.291508 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-utilities\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.312638 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.312720 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.329258 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjnxc\" (UniqueName: \"kubernetes.io/projected/fdaae371-5a2d-4766-a947-752333930f8f-kube-api-access-kjnxc\") pod \"redhat-operators-5f6kq\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:03 crc kubenswrapper[4689]: I0123 11:25:03.427626 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:04 crc kubenswrapper[4689]: I0123 11:25:04.009739 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5f6kq"] Jan 23 11:25:04 crc kubenswrapper[4689]: I0123 11:25:04.172772 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5f6kq" event={"ID":"fdaae371-5a2d-4766-a947-752333930f8f","Type":"ContainerStarted","Data":"0533b4e81517469cf6f8e11a7c77169eb50fc6e3bbad478a0233522f5a047bcb"} Jan 23 11:25:05 crc kubenswrapper[4689]: I0123 11:25:05.187231 4689 generic.go:334] "Generic (PLEG): container finished" podID="fdaae371-5a2d-4766-a947-752333930f8f" containerID="606c4a6d4a750e86f7222ae55b12b08864d206b668f0a9099d92b8bb22bdae41" exitCode=0 Jan 23 11:25:05 crc kubenswrapper[4689]: I0123 11:25:05.187322 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5f6kq" event={"ID":"fdaae371-5a2d-4766-a947-752333930f8f","Type":"ContainerDied","Data":"606c4a6d4a750e86f7222ae55b12b08864d206b668f0a9099d92b8bb22bdae41"} Jan 23 11:25:06 crc kubenswrapper[4689]: I0123 11:25:06.203235 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5f6kq" event={"ID":"fdaae371-5a2d-4766-a947-752333930f8f","Type":"ContainerStarted","Data":"f5ac6691fd688025c3c73d0c109c4572e6d4faa6aa81e60a5aef470d69fc214b"} Jan 23 11:25:11 crc kubenswrapper[4689]: I0123 11:25:11.278329 4689 generic.go:334] "Generic (PLEG): container finished" podID="fdaae371-5a2d-4766-a947-752333930f8f" containerID="f5ac6691fd688025c3c73d0c109c4572e6d4faa6aa81e60a5aef470d69fc214b" exitCode=0 Jan 23 11:25:11 crc kubenswrapper[4689]: I0123 11:25:11.278430 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5f6kq" event={"ID":"fdaae371-5a2d-4766-a947-752333930f8f","Type":"ContainerDied","Data":"f5ac6691fd688025c3c73d0c109c4572e6d4faa6aa81e60a5aef470d69fc214b"} Jan 23 11:25:12 crc kubenswrapper[4689]: I0123 11:25:12.291504 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5f6kq" event={"ID":"fdaae371-5a2d-4766-a947-752333930f8f","Type":"ContainerStarted","Data":"47a4d541c82d2ff52d041abc564fa46f580ba1edc445fd6ff4e8b948b3c68d1a"} Jan 23 11:25:13 crc kubenswrapper[4689]: I0123 11:25:13.427792 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:13 crc kubenswrapper[4689]: I0123 11:25:13.427848 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:14 crc kubenswrapper[4689]: I0123 11:25:14.474482 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5f6kq" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="registry-server" probeResult="failure" output=< Jan 23 11:25:14 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:25:14 crc kubenswrapper[4689]: > Jan 23 11:25:18 crc kubenswrapper[4689]: I0123 11:25:18.879699 4689 scope.go:117] "RemoveContainer" containerID="d4ac9565f628f614399f6dccc953e3b17707907a5f4101691334b7f8d5f5f5ca" Jan 23 11:25:18 crc kubenswrapper[4689]: I0123 11:25:18.906645 4689 scope.go:117] "RemoveContainer" containerID="860b2e60837910ebdb4dcf0d77734726303ed567543edfcf085856979f25d8e4" Jan 23 11:25:18 crc kubenswrapper[4689]: I0123 11:25:18.968839 4689 scope.go:117] "RemoveContainer" containerID="ab746f5bfc727faff8c2d14cf846a46da9bdbf546b425054933805eddc48101a" Jan 23 11:25:19 crc kubenswrapper[4689]: I0123 11:25:19.028269 4689 scope.go:117] "RemoveContainer" containerID="73ca42c6e19fb5542e75c8c2d4ca6a2e2cb7ec372f27fe3a7f40d1dd4f49213b" Jan 23 11:25:19 crc kubenswrapper[4689]: I0123 11:25:19.101543 4689 scope.go:117] "RemoveContainer" containerID="12b802b621f6682c2aa84ae6f8e224746e9d3e7a66592d4238bb02f54e31058c" Jan 23 11:25:23 crc kubenswrapper[4689]: I0123 11:25:23.506747 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:23 crc kubenswrapper[4689]: I0123 11:25:23.546489 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5f6kq" podStartSLOduration=13.861403632 podStartE2EDuration="20.546462771s" podCreationTimestamp="2026-01-23 11:25:03 +0000 UTC" firstStartedPulling="2026-01-23 11:25:05.190032319 +0000 UTC m=+2169.814712198" lastFinishedPulling="2026-01-23 11:25:11.875091468 +0000 UTC m=+2176.499771337" observedRunningTime="2026-01-23 11:25:12.312418456 +0000 UTC m=+2176.937098315" watchObservedRunningTime="2026-01-23 11:25:23.546462771 +0000 UTC m=+2188.171142640" Jan 23 11:25:23 crc kubenswrapper[4689]: I0123 11:25:23.584272 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:23 crc kubenswrapper[4689]: I0123 11:25:23.770632 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5f6kq"] Jan 23 11:25:25 crc kubenswrapper[4689]: I0123 11:25:25.458265 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5f6kq" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="registry-server" containerID="cri-o://47a4d541c82d2ff52d041abc564fa46f580ba1edc445fd6ff4e8b948b3c68d1a" gracePeriod=2 Jan 23 11:25:25 crc kubenswrapper[4689]: E0123 11:25:25.712956 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfdaae371_5a2d_4766_a947_752333930f8f.slice/crio-47a4d541c82d2ff52d041abc564fa46f580ba1edc445fd6ff4e8b948b3c68d1a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfdaae371_5a2d_4766_a947_752333930f8f.slice/crio-conmon-47a4d541c82d2ff52d041abc564fa46f580ba1edc445fd6ff4e8b948b3c68d1a.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.471550 4689 generic.go:334] "Generic (PLEG): container finished" podID="fdaae371-5a2d-4766-a947-752333930f8f" containerID="47a4d541c82d2ff52d041abc564fa46f580ba1edc445fd6ff4e8b948b3c68d1a" exitCode=0 Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.471648 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5f6kq" event={"ID":"fdaae371-5a2d-4766-a947-752333930f8f","Type":"ContainerDied","Data":"47a4d541c82d2ff52d041abc564fa46f580ba1edc445fd6ff4e8b948b3c68d1a"} Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.472345 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5f6kq" event={"ID":"fdaae371-5a2d-4766-a947-752333930f8f","Type":"ContainerDied","Data":"0533b4e81517469cf6f8e11a7c77169eb50fc6e3bbad478a0233522f5a047bcb"} Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.472704 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0533b4e81517469cf6f8e11a7c77169eb50fc6e3bbad478a0233522f5a047bcb" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.558775 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.708387 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjnxc\" (UniqueName: \"kubernetes.io/projected/fdaae371-5a2d-4766-a947-752333930f8f-kube-api-access-kjnxc\") pod \"fdaae371-5a2d-4766-a947-752333930f8f\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.708791 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-catalog-content\") pod \"fdaae371-5a2d-4766-a947-752333930f8f\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.708949 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-utilities\") pod \"fdaae371-5a2d-4766-a947-752333930f8f\" (UID: \"fdaae371-5a2d-4766-a947-752333930f8f\") " Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.710300 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-utilities" (OuterVolumeSpecName: "utilities") pod "fdaae371-5a2d-4766-a947-752333930f8f" (UID: "fdaae371-5a2d-4766-a947-752333930f8f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.714842 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdaae371-5a2d-4766-a947-752333930f8f-kube-api-access-kjnxc" (OuterVolumeSpecName: "kube-api-access-kjnxc") pod "fdaae371-5a2d-4766-a947-752333930f8f" (UID: "fdaae371-5a2d-4766-a947-752333930f8f"). InnerVolumeSpecName "kube-api-access-kjnxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.811963 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.812099 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjnxc\" (UniqueName: \"kubernetes.io/projected/fdaae371-5a2d-4766-a947-752333930f8f-kube-api-access-kjnxc\") on node \"crc\" DevicePath \"\"" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.823927 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fdaae371-5a2d-4766-a947-752333930f8f" (UID: "fdaae371-5a2d-4766-a947-752333930f8f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:25:26 crc kubenswrapper[4689]: I0123 11:25:26.915323 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaae371-5a2d-4766-a947-752333930f8f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:25:27 crc kubenswrapper[4689]: I0123 11:25:27.482591 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5f6kq" Jan 23 11:25:27 crc kubenswrapper[4689]: I0123 11:25:27.527763 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5f6kq"] Jan 23 11:25:27 crc kubenswrapper[4689]: I0123 11:25:27.539786 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5f6kq"] Jan 23 11:25:27 crc kubenswrapper[4689]: I0123 11:25:27.655305 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdaae371-5a2d-4766-a947-752333930f8f" path="/var/lib/kubelet/pods/fdaae371-5a2d-4766-a947-752333930f8f/volumes" Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.310594 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.311137 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.311232 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.312406 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.312514 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" gracePeriod=600 Jan 23 11:25:33 crc kubenswrapper[4689]: E0123 11:25:33.432663 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.545958 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" exitCode=0 Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.546006 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f"} Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.546037 4689 scope.go:117] "RemoveContainer" containerID="105d16365ad97c922e3f7df6b806bc3e7525acd81cd64b9ddd66539fe5554ac9" Jan 23 11:25:33 crc kubenswrapper[4689]: I0123 11:25:33.546812 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:25:33 crc kubenswrapper[4689]: E0123 11:25:33.547172 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:25:43 crc kubenswrapper[4689]: I0123 11:25:43.048431 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-mhxmh"] Jan 23 11:25:43 crc kubenswrapper[4689]: I0123 11:25:43.062677 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-mhxmh"] Jan 23 11:25:43 crc kubenswrapper[4689]: I0123 11:25:43.661028 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6502814-424c-4bb5-bd7f-bd986f84c813" path="/var/lib/kubelet/pods/d6502814-424c-4bb5-bd7f-bd986f84c813/volumes" Jan 23 11:25:47 crc kubenswrapper[4689]: I0123 11:25:47.640501 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:25:47 crc kubenswrapper[4689]: E0123 11:25:47.641380 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:25:58 crc kubenswrapper[4689]: I0123 11:25:58.293130 4689 generic.go:334] "Generic (PLEG): container finished" podID="d2250236-884b-4557-b447-5f6fe512fbdf" containerID="448c4882dd8038bf41aeca5d729eaa83aeee1b1d636223be79bcf4b3ff0b2a41" exitCode=0 Jan 23 11:25:58 crc kubenswrapper[4689]: I0123 11:25:58.293208 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" event={"ID":"d2250236-884b-4557-b447-5f6fe512fbdf","Type":"ContainerDied","Data":"448c4882dd8038bf41aeca5d729eaa83aeee1b1d636223be79bcf4b3ff0b2a41"} Jan 23 11:25:58 crc kubenswrapper[4689]: I0123 11:25:58.640872 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:25:58 crc kubenswrapper[4689]: E0123 11:25:58.641237 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:25:59 crc kubenswrapper[4689]: I0123 11:25:59.859480 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.031702 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-inventory\") pod \"d2250236-884b-4557-b447-5f6fe512fbdf\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.031940 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-ssh-key-openstack-edpm-ipam\") pod \"d2250236-884b-4557-b447-5f6fe512fbdf\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.032450 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjh7r\" (UniqueName: \"kubernetes.io/projected/d2250236-884b-4557-b447-5f6fe512fbdf-kube-api-access-zjh7r\") pod \"d2250236-884b-4557-b447-5f6fe512fbdf\" (UID: \"d2250236-884b-4557-b447-5f6fe512fbdf\") " Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.037682 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2250236-884b-4557-b447-5f6fe512fbdf-kube-api-access-zjh7r" (OuterVolumeSpecName: "kube-api-access-zjh7r") pod "d2250236-884b-4557-b447-5f6fe512fbdf" (UID: "d2250236-884b-4557-b447-5f6fe512fbdf"). InnerVolumeSpecName "kube-api-access-zjh7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.062677 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-inventory" (OuterVolumeSpecName: "inventory") pod "d2250236-884b-4557-b447-5f6fe512fbdf" (UID: "d2250236-884b-4557-b447-5f6fe512fbdf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.068696 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d2250236-884b-4557-b447-5f6fe512fbdf" (UID: "d2250236-884b-4557-b447-5f6fe512fbdf"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.135542 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.135576 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2250236-884b-4557-b447-5f6fe512fbdf-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.135587 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjh7r\" (UniqueName: \"kubernetes.io/projected/d2250236-884b-4557-b447-5f6fe512fbdf-kube-api-access-zjh7r\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.332582 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" event={"ID":"d2250236-884b-4557-b447-5f6fe512fbdf","Type":"ContainerDied","Data":"7694a4a227c0a7690301fbc3fa1cfbe7fa88a1b8e48dbbbb5fdfa32055584817"} Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.333062 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7694a4a227c0a7690301fbc3fa1cfbe7fa88a1b8e48dbbbb5fdfa32055584817" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.333032 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.404944 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj"] Jan 23 11:26:00 crc kubenswrapper[4689]: E0123 11:26:00.405630 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="registry-server" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.405655 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="registry-server" Jan 23 11:26:00 crc kubenswrapper[4689]: E0123 11:26:00.405704 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="extract-content" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.405713 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="extract-content" Jan 23 11:26:00 crc kubenswrapper[4689]: E0123 11:26:00.405725 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="extract-utilities" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.405734 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="extract-utilities" Jan 23 11:26:00 crc kubenswrapper[4689]: E0123 11:26:00.405746 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2250236-884b-4557-b447-5f6fe512fbdf" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.405755 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2250236-884b-4557-b447-5f6fe512fbdf" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.406029 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2250236-884b-4557-b447-5f6fe512fbdf" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.406065 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdaae371-5a2d-4766-a947-752333930f8f" containerName="registry-server" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.407124 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.410338 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.410397 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.410615 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.412230 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.421475 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj"] Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.547638 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bgxf\" (UniqueName: \"kubernetes.io/projected/175a3ee2-d571-4f2f-8688-536d09975ffd-kube-api-access-2bgxf\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.547935 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.547976 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.649690 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bgxf\" (UniqueName: \"kubernetes.io/projected/175a3ee2-d571-4f2f-8688-536d09975ffd-kube-api-access-2bgxf\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.649777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.649820 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.653615 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.668417 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.673706 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bgxf\" (UniqueName: \"kubernetes.io/projected/175a3ee2-d571-4f2f-8688-536d09975ffd-kube-api-access-2bgxf\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:00 crc kubenswrapper[4689]: I0123 11:26:00.738280 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:01 crc kubenswrapper[4689]: I0123 11:26:01.263172 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj"] Jan 23 11:26:01 crc kubenswrapper[4689]: I0123 11:26:01.346016 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" event={"ID":"175a3ee2-d571-4f2f-8688-536d09975ffd","Type":"ContainerStarted","Data":"5723d4390e2e2a22ca28549d4bee1b72f3fbb0cdf31fdc627c6af78d2d21a873"} Jan 23 11:26:02 crc kubenswrapper[4689]: I0123 11:26:02.357667 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" event={"ID":"175a3ee2-d571-4f2f-8688-536d09975ffd","Type":"ContainerStarted","Data":"7c6ede95f0fe7ac7e2447b6312a9af3681fe840efd3de23cee117286dfd5c7a2"} Jan 23 11:26:02 crc kubenswrapper[4689]: I0123 11:26:02.383437 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" podStartSLOduration=1.8284663829999999 podStartE2EDuration="2.38341229s" podCreationTimestamp="2026-01-23 11:26:00 +0000 UTC" firstStartedPulling="2026-01-23 11:26:01.26447012 +0000 UTC m=+2225.889149969" lastFinishedPulling="2026-01-23 11:26:01.819416017 +0000 UTC m=+2226.444095876" observedRunningTime="2026-01-23 11:26:02.374439607 +0000 UTC m=+2226.999119456" watchObservedRunningTime="2026-01-23 11:26:02.38341229 +0000 UTC m=+2227.008092149" Jan 23 11:26:07 crc kubenswrapper[4689]: I0123 11:26:07.414454 4689 generic.go:334] "Generic (PLEG): container finished" podID="175a3ee2-d571-4f2f-8688-536d09975ffd" containerID="7c6ede95f0fe7ac7e2447b6312a9af3681fe840efd3de23cee117286dfd5c7a2" exitCode=0 Jan 23 11:26:07 crc kubenswrapper[4689]: I0123 11:26:07.414542 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" event={"ID":"175a3ee2-d571-4f2f-8688-536d09975ffd","Type":"ContainerDied","Data":"7c6ede95f0fe7ac7e2447b6312a9af3681fe840efd3de23cee117286dfd5c7a2"} Jan 23 11:26:08 crc kubenswrapper[4689]: I0123 11:26:08.973586 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.087584 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bgxf\" (UniqueName: \"kubernetes.io/projected/175a3ee2-d571-4f2f-8688-536d09975ffd-kube-api-access-2bgxf\") pod \"175a3ee2-d571-4f2f-8688-536d09975ffd\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.087905 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-ssh-key-openstack-edpm-ipam\") pod \"175a3ee2-d571-4f2f-8688-536d09975ffd\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.088661 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-inventory\") pod \"175a3ee2-d571-4f2f-8688-536d09975ffd\" (UID: \"175a3ee2-d571-4f2f-8688-536d09975ffd\") " Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.095203 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/175a3ee2-d571-4f2f-8688-536d09975ffd-kube-api-access-2bgxf" (OuterVolumeSpecName: "kube-api-access-2bgxf") pod "175a3ee2-d571-4f2f-8688-536d09975ffd" (UID: "175a3ee2-d571-4f2f-8688-536d09975ffd"). InnerVolumeSpecName "kube-api-access-2bgxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.130428 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "175a3ee2-d571-4f2f-8688-536d09975ffd" (UID: "175a3ee2-d571-4f2f-8688-536d09975ffd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.131364 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-inventory" (OuterVolumeSpecName: "inventory") pod "175a3ee2-d571-4f2f-8688-536d09975ffd" (UID: "175a3ee2-d571-4f2f-8688-536d09975ffd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.191932 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.192121 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/175a3ee2-d571-4f2f-8688-536d09975ffd-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.194202 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bgxf\" (UniqueName: \"kubernetes.io/projected/175a3ee2-d571-4f2f-8688-536d09975ffd-kube-api-access-2bgxf\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.444345 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" event={"ID":"175a3ee2-d571-4f2f-8688-536d09975ffd","Type":"ContainerDied","Data":"5723d4390e2e2a22ca28549d4bee1b72f3fbb0cdf31fdc627c6af78d2d21a873"} Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.444911 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5723d4390e2e2a22ca28549d4bee1b72f3fbb0cdf31fdc627c6af78d2d21a873" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.444437 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.535629 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn"] Jan 23 11:26:09 crc kubenswrapper[4689]: E0123 11:26:09.536329 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175a3ee2-d571-4f2f-8688-536d09975ffd" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.536368 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="175a3ee2-d571-4f2f-8688-536d09975ffd" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.536752 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="175a3ee2-d571-4f2f-8688-536d09975ffd" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.537854 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.540687 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.540931 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.541072 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.541515 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.547132 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn"] Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.706696 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.706972 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.707444 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czzdh\" (UniqueName: \"kubernetes.io/projected/a81f5043-068a-4164-a466-f867c148c637-kube-api-access-czzdh\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.810974 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.811189 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.811455 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czzdh\" (UniqueName: \"kubernetes.io/projected/a81f5043-068a-4164-a466-f867c148c637-kube-api-access-czzdh\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.816944 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.824205 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.834790 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czzdh\" (UniqueName: \"kubernetes.io/projected/a81f5043-068a-4164-a466-f867c148c637-kube-api-access-czzdh\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-vx4zn\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:09 crc kubenswrapper[4689]: I0123 11:26:09.866437 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:10 crc kubenswrapper[4689]: I0123 11:26:10.423409 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn"] Jan 23 11:26:10 crc kubenswrapper[4689]: W0123 11:26:10.430382 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda81f5043_068a_4164_a466_f867c148c637.slice/crio-3d5d39a4fdb6929a1a45ec894cdb37de49385181ff90a575c989016eda0af717 WatchSource:0}: Error finding container 3d5d39a4fdb6929a1a45ec894cdb37de49385181ff90a575c989016eda0af717: Status 404 returned error can't find the container with id 3d5d39a4fdb6929a1a45ec894cdb37de49385181ff90a575c989016eda0af717 Jan 23 11:26:10 crc kubenswrapper[4689]: I0123 11:26:10.454127 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" event={"ID":"a81f5043-068a-4164-a466-f867c148c637","Type":"ContainerStarted","Data":"3d5d39a4fdb6929a1a45ec894cdb37de49385181ff90a575c989016eda0af717"} Jan 23 11:26:11 crc kubenswrapper[4689]: I0123 11:26:11.469187 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" event={"ID":"a81f5043-068a-4164-a466-f867c148c637","Type":"ContainerStarted","Data":"6c228c3b59fedd834c0f462c70bf54bb30dcce639a8e8b9a784840d119b29f7a"} Jan 23 11:26:11 crc kubenswrapper[4689]: I0123 11:26:11.489055 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" podStartSLOduration=2.074378161 podStartE2EDuration="2.489035473s" podCreationTimestamp="2026-01-23 11:26:09 +0000 UTC" firstStartedPulling="2026-01-23 11:26:10.432790384 +0000 UTC m=+2235.057470253" lastFinishedPulling="2026-01-23 11:26:10.847447696 +0000 UTC m=+2235.472127565" observedRunningTime="2026-01-23 11:26:11.487250248 +0000 UTC m=+2236.111930107" watchObservedRunningTime="2026-01-23 11:26:11.489035473 +0000 UTC m=+2236.113715332" Jan 23 11:26:13 crc kubenswrapper[4689]: I0123 11:26:13.639926 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:26:13 crc kubenswrapper[4689]: E0123 11:26:13.640591 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:26:19 crc kubenswrapper[4689]: I0123 11:26:19.270059 4689 scope.go:117] "RemoveContainer" containerID="945a1ac05c331d59e690ce2f632d6dd87b6c746d3968c445da4f420da85210fd" Jan 23 11:26:24 crc kubenswrapper[4689]: I0123 11:26:24.640635 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:26:24 crc kubenswrapper[4689]: E0123 11:26:24.641691 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:26:35 crc kubenswrapper[4689]: I0123 11:26:35.652040 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:26:35 crc kubenswrapper[4689]: E0123 11:26:35.653034 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:26:47 crc kubenswrapper[4689]: I0123 11:26:47.099299 4689 generic.go:334] "Generic (PLEG): container finished" podID="a81f5043-068a-4164-a466-f867c148c637" containerID="6c228c3b59fedd834c0f462c70bf54bb30dcce639a8e8b9a784840d119b29f7a" exitCode=0 Jan 23 11:26:47 crc kubenswrapper[4689]: I0123 11:26:47.099371 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" event={"ID":"a81f5043-068a-4164-a466-f867c148c637","Type":"ContainerDied","Data":"6c228c3b59fedd834c0f462c70bf54bb30dcce639a8e8b9a784840d119b29f7a"} Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.639880 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:26:48 crc kubenswrapper[4689]: E0123 11:26:48.640640 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.656287 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.777047 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-ssh-key-openstack-edpm-ipam\") pod \"a81f5043-068a-4164-a466-f867c148c637\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.777095 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czzdh\" (UniqueName: \"kubernetes.io/projected/a81f5043-068a-4164-a466-f867c148c637-kube-api-access-czzdh\") pod \"a81f5043-068a-4164-a466-f867c148c637\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.777197 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-inventory\") pod \"a81f5043-068a-4164-a466-f867c148c637\" (UID: \"a81f5043-068a-4164-a466-f867c148c637\") " Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.790436 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a81f5043-068a-4164-a466-f867c148c637-kube-api-access-czzdh" (OuterVolumeSpecName: "kube-api-access-czzdh") pod "a81f5043-068a-4164-a466-f867c148c637" (UID: "a81f5043-068a-4164-a466-f867c148c637"). InnerVolumeSpecName "kube-api-access-czzdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.814529 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a81f5043-068a-4164-a466-f867c148c637" (UID: "a81f5043-068a-4164-a466-f867c148c637"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.815257 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-inventory" (OuterVolumeSpecName: "inventory") pod "a81f5043-068a-4164-a466-f867c148c637" (UID: "a81f5043-068a-4164-a466-f867c148c637"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.880715 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.880758 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czzdh\" (UniqueName: \"kubernetes.io/projected/a81f5043-068a-4164-a466-f867c148c637-kube-api-access-czzdh\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:48 crc kubenswrapper[4689]: I0123 11:26:48.880773 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a81f5043-068a-4164-a466-f867c148c637-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.124028 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" event={"ID":"a81f5043-068a-4164-a466-f867c148c637","Type":"ContainerDied","Data":"3d5d39a4fdb6929a1a45ec894cdb37de49385181ff90a575c989016eda0af717"} Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.124080 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d5d39a4fdb6929a1a45ec894cdb37de49385181ff90a575c989016eda0af717" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.124088 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-vx4zn" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.239697 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4"] Jan 23 11:26:49 crc kubenswrapper[4689]: E0123 11:26:49.240373 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a81f5043-068a-4164-a466-f867c148c637" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.240395 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="a81f5043-068a-4164-a466-f867c148c637" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.240636 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="a81f5043-068a-4164-a466-f867c148c637" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.241596 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.244423 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.245265 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.245265 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.245353 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.255662 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4"] Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.394132 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.394584 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wjvq\" (UniqueName: \"kubernetes.io/projected/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-kube-api-access-8wjvq\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.395065 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.497253 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wjvq\" (UniqueName: \"kubernetes.io/projected/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-kube-api-access-8wjvq\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.497755 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.497880 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.505500 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.508212 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.518487 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wjvq\" (UniqueName: \"kubernetes.io/projected/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-kube-api-access-8wjvq\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:49 crc kubenswrapper[4689]: I0123 11:26:49.572854 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:26:50 crc kubenswrapper[4689]: I0123 11:26:50.111032 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4"] Jan 23 11:26:50 crc kubenswrapper[4689]: I0123 11:26:50.113287 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:26:50 crc kubenswrapper[4689]: I0123 11:26:50.133008 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" event={"ID":"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd","Type":"ContainerStarted","Data":"104c97b1f475d58482a65de853c766a5fddc104a18a74cb8f062be9334fae6a2"} Jan 23 11:26:52 crc kubenswrapper[4689]: I0123 11:26:52.154125 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" event={"ID":"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd","Type":"ContainerStarted","Data":"d6100581e8348a80c91b14ee156d6178994a88866684acd2456c3a1c3281de65"} Jan 23 11:26:52 crc kubenswrapper[4689]: I0123 11:26:52.183892 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" podStartSLOduration=2.163894139 podStartE2EDuration="3.183864424s" podCreationTimestamp="2026-01-23 11:26:49 +0000 UTC" firstStartedPulling="2026-01-23 11:26:50.113057196 +0000 UTC m=+2274.737737055" lastFinishedPulling="2026-01-23 11:26:51.133027461 +0000 UTC m=+2275.757707340" observedRunningTime="2026-01-23 11:26:52.172410749 +0000 UTC m=+2276.797090618" watchObservedRunningTime="2026-01-23 11:26:52.183864424 +0000 UTC m=+2276.808544303" Jan 23 11:27:03 crc kubenswrapper[4689]: I0123 11:27:03.640098 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:27:03 crc kubenswrapper[4689]: E0123 11:27:03.641466 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:27:17 crc kubenswrapper[4689]: I0123 11:27:17.640194 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:27:17 crc kubenswrapper[4689]: E0123 11:27:17.641121 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:27:30 crc kubenswrapper[4689]: I0123 11:27:30.639870 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:27:30 crc kubenswrapper[4689]: E0123 11:27:30.640592 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:27:39 crc kubenswrapper[4689]: I0123 11:27:39.059964 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-pzpjk"] Jan 23 11:27:39 crc kubenswrapper[4689]: I0123 11:27:39.085460 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-pzpjk"] Jan 23 11:27:39 crc kubenswrapper[4689]: I0123 11:27:39.653646 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90127ad8-6a3a-402c-809f-d678f574fb09" path="/var/lib/kubelet/pods/90127ad8-6a3a-402c-809f-d678f574fb09/volumes" Jan 23 11:27:41 crc kubenswrapper[4689]: I0123 11:27:41.683869 4689 generic.go:334] "Generic (PLEG): container finished" podID="05c5cfbc-c7bf-404d-84f3-d65bb01f34fd" containerID="d6100581e8348a80c91b14ee156d6178994a88866684acd2456c3a1c3281de65" exitCode=0 Jan 23 11:27:41 crc kubenswrapper[4689]: I0123 11:27:41.683939 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" event={"ID":"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd","Type":"ContainerDied","Data":"d6100581e8348a80c91b14ee156d6178994a88866684acd2456c3a1c3281de65"} Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.265675 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.305837 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wjvq\" (UniqueName: \"kubernetes.io/projected/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-kube-api-access-8wjvq\") pod \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.305934 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-ssh-key-openstack-edpm-ipam\") pod \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.306141 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-inventory\") pod \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\" (UID: \"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd\") " Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.313881 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-kube-api-access-8wjvq" (OuterVolumeSpecName: "kube-api-access-8wjvq") pod "05c5cfbc-c7bf-404d-84f3-d65bb01f34fd" (UID: "05c5cfbc-c7bf-404d-84f3-d65bb01f34fd"). InnerVolumeSpecName "kube-api-access-8wjvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.337566 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-inventory" (OuterVolumeSpecName: "inventory") pod "05c5cfbc-c7bf-404d-84f3-d65bb01f34fd" (UID: "05c5cfbc-c7bf-404d-84f3-d65bb01f34fd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.362042 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "05c5cfbc-c7bf-404d-84f3-d65bb01f34fd" (UID: "05c5cfbc-c7bf-404d-84f3-d65bb01f34fd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.410289 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.410346 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wjvq\" (UniqueName: \"kubernetes.io/projected/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-kube-api-access-8wjvq\") on node \"crc\" DevicePath \"\"" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.410370 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/05c5cfbc-c7bf-404d-84f3-d65bb01f34fd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.706139 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" event={"ID":"05c5cfbc-c7bf-404d-84f3-d65bb01f34fd","Type":"ContainerDied","Data":"104c97b1f475d58482a65de853c766a5fddc104a18a74cb8f062be9334fae6a2"} Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.706198 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="104c97b1f475d58482a65de853c766a5fddc104a18a74cb8f062be9334fae6a2" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.706224 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.793090 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8fp9h"] Jan 23 11:27:43 crc kubenswrapper[4689]: E0123 11:27:43.794108 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05c5cfbc-c7bf-404d-84f3-d65bb01f34fd" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.794130 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="05c5cfbc-c7bf-404d-84f3-d65bb01f34fd" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.794445 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="05c5cfbc-c7bf-404d-84f3-d65bb01f34fd" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.795704 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.799919 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.800389 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.800592 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.802895 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.804339 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8fp9h"] Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.822852 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdflm\" (UniqueName: \"kubernetes.io/projected/162b7807-b14d-451f-8f58-e5bab6b5382c-kube-api-access-cdflm\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.823276 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.824515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.927103 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.927595 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.927750 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdflm\" (UniqueName: \"kubernetes.io/projected/162b7807-b14d-451f-8f58-e5bab6b5382c-kube-api-access-cdflm\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.933032 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.939404 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:43 crc kubenswrapper[4689]: I0123 11:27:43.944400 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdflm\" (UniqueName: \"kubernetes.io/projected/162b7807-b14d-451f-8f58-e5bab6b5382c-kube-api-access-cdflm\") pod \"ssh-known-hosts-edpm-deployment-8fp9h\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:44 crc kubenswrapper[4689]: I0123 11:27:44.141660 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:44 crc kubenswrapper[4689]: I0123 11:27:44.639647 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:27:44 crc kubenswrapper[4689]: E0123 11:27:44.640240 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:27:44 crc kubenswrapper[4689]: I0123 11:27:44.727547 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8fp9h"] Jan 23 11:27:45 crc kubenswrapper[4689]: I0123 11:27:45.727064 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" event={"ID":"162b7807-b14d-451f-8f58-e5bab6b5382c","Type":"ContainerStarted","Data":"f555e74761712acad3e8cf69cc9593c8c250280393fbedf9f1bc23e6bb5d770f"} Jan 23 11:27:45 crc kubenswrapper[4689]: I0123 11:27:45.727589 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" event={"ID":"162b7807-b14d-451f-8f58-e5bab6b5382c","Type":"ContainerStarted","Data":"434c51b09b8a51cabdcf74d5990673f789607d24e302155dba01fd36c76b9dbc"} Jan 23 11:27:45 crc kubenswrapper[4689]: I0123 11:27:45.752302 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" podStartSLOduration=2.280118358 podStartE2EDuration="2.752281023s" podCreationTimestamp="2026-01-23 11:27:43 +0000 UTC" firstStartedPulling="2026-01-23 11:27:44.722234847 +0000 UTC m=+2329.346914696" lastFinishedPulling="2026-01-23 11:27:45.194397502 +0000 UTC m=+2329.819077361" observedRunningTime="2026-01-23 11:27:45.74216565 +0000 UTC m=+2330.366845509" watchObservedRunningTime="2026-01-23 11:27:45.752281023 +0000 UTC m=+2330.376960892" Jan 23 11:27:52 crc kubenswrapper[4689]: I0123 11:27:52.795189 4689 generic.go:334] "Generic (PLEG): container finished" podID="162b7807-b14d-451f-8f58-e5bab6b5382c" containerID="f555e74761712acad3e8cf69cc9593c8c250280393fbedf9f1bc23e6bb5d770f" exitCode=0 Jan 23 11:27:52 crc kubenswrapper[4689]: I0123 11:27:52.795746 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" event={"ID":"162b7807-b14d-451f-8f58-e5bab6b5382c","Type":"ContainerDied","Data":"f555e74761712acad3e8cf69cc9593c8c250280393fbedf9f1bc23e6bb5d770f"} Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.277810 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.409914 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-ssh-key-openstack-edpm-ipam\") pod \"162b7807-b14d-451f-8f58-e5bab6b5382c\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.410078 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-inventory-0\") pod \"162b7807-b14d-451f-8f58-e5bab6b5382c\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.410605 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdflm\" (UniqueName: \"kubernetes.io/projected/162b7807-b14d-451f-8f58-e5bab6b5382c-kube-api-access-cdflm\") pod \"162b7807-b14d-451f-8f58-e5bab6b5382c\" (UID: \"162b7807-b14d-451f-8f58-e5bab6b5382c\") " Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.425636 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/162b7807-b14d-451f-8f58-e5bab6b5382c-kube-api-access-cdflm" (OuterVolumeSpecName: "kube-api-access-cdflm") pod "162b7807-b14d-451f-8f58-e5bab6b5382c" (UID: "162b7807-b14d-451f-8f58-e5bab6b5382c"). InnerVolumeSpecName "kube-api-access-cdflm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.449457 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "162b7807-b14d-451f-8f58-e5bab6b5382c" (UID: "162b7807-b14d-451f-8f58-e5bab6b5382c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.454542 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "162b7807-b14d-451f-8f58-e5bab6b5382c" (UID: "162b7807-b14d-451f-8f58-e5bab6b5382c"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.523684 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdflm\" (UniqueName: \"kubernetes.io/projected/162b7807-b14d-451f-8f58-e5bab6b5382c-kube-api-access-cdflm\") on node \"crc\" DevicePath \"\"" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.523773 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.523832 4689 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/162b7807-b14d-451f-8f58-e5bab6b5382c-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.816315 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" event={"ID":"162b7807-b14d-451f-8f58-e5bab6b5382c","Type":"ContainerDied","Data":"434c51b09b8a51cabdcf74d5990673f789607d24e302155dba01fd36c76b9dbc"} Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.816361 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="434c51b09b8a51cabdcf74d5990673f789607d24e302155dba01fd36c76b9dbc" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.816449 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8fp9h" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.912946 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8"] Jan 23 11:27:54 crc kubenswrapper[4689]: E0123 11:27:54.913530 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="162b7807-b14d-451f-8f58-e5bab6b5382c" containerName="ssh-known-hosts-edpm-deployment" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.913550 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="162b7807-b14d-451f-8f58-e5bab6b5382c" containerName="ssh-known-hosts-edpm-deployment" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.913830 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="162b7807-b14d-451f-8f58-e5bab6b5382c" containerName="ssh-known-hosts-edpm-deployment" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.914870 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.927302 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.927342 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.927594 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.927681 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:27:54 crc kubenswrapper[4689]: I0123 11:27:54.933275 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8"] Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.037037 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmkh4\" (UniqueName: \"kubernetes.io/projected/4ea2f795-bc17-496a-af2a-934d59f8aa81-kube-api-access-hmkh4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.037112 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.037245 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.140133 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmkh4\" (UniqueName: \"kubernetes.io/projected/4ea2f795-bc17-496a-af2a-934d59f8aa81-kube-api-access-hmkh4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.140244 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.140315 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.145938 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.148795 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.162633 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmkh4\" (UniqueName: \"kubernetes.io/projected/4ea2f795-bc17-496a-af2a-934d59f8aa81-kube-api-access-hmkh4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lwxl8\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.242517 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.647866 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:27:55 crc kubenswrapper[4689]: E0123 11:27:55.648518 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.797808 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8"] Jan 23 11:27:55 crc kubenswrapper[4689]: I0123 11:27:55.829638 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" event={"ID":"4ea2f795-bc17-496a-af2a-934d59f8aa81","Type":"ContainerStarted","Data":"269b071d2efc7b95f67dd84d1913f271b6e4431180ad887b8a99fb9be57430df"} Jan 23 11:27:56 crc kubenswrapper[4689]: I0123 11:27:56.296014 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:27:56 crc kubenswrapper[4689]: I0123 11:27:56.842079 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" event={"ID":"4ea2f795-bc17-496a-af2a-934d59f8aa81","Type":"ContainerStarted","Data":"bac34f8e1308799859ef19d38146ec3cc42ca7ea4dd46923823b6f09decd3ae4"} Jan 23 11:27:56 crc kubenswrapper[4689]: I0123 11:27:56.873830 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" podStartSLOduration=2.384166354 podStartE2EDuration="2.873812725s" podCreationTimestamp="2026-01-23 11:27:54 +0000 UTC" firstStartedPulling="2026-01-23 11:27:55.80286172 +0000 UTC m=+2340.427541579" lastFinishedPulling="2026-01-23 11:27:56.292508071 +0000 UTC m=+2340.917187950" observedRunningTime="2026-01-23 11:27:56.863740224 +0000 UTC m=+2341.488420083" watchObservedRunningTime="2026-01-23 11:27:56.873812725 +0000 UTC m=+2341.498492574" Jan 23 11:28:04 crc kubenswrapper[4689]: I0123 11:28:04.957705 4689 generic.go:334] "Generic (PLEG): container finished" podID="4ea2f795-bc17-496a-af2a-934d59f8aa81" containerID="bac34f8e1308799859ef19d38146ec3cc42ca7ea4dd46923823b6f09decd3ae4" exitCode=0 Jan 23 11:28:04 crc kubenswrapper[4689]: I0123 11:28:04.957790 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" event={"ID":"4ea2f795-bc17-496a-af2a-934d59f8aa81","Type":"ContainerDied","Data":"bac34f8e1308799859ef19d38146ec3cc42ca7ea4dd46923823b6f09decd3ae4"} Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.434550 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.564073 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-ssh-key-openstack-edpm-ipam\") pod \"4ea2f795-bc17-496a-af2a-934d59f8aa81\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.564506 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-inventory\") pod \"4ea2f795-bc17-496a-af2a-934d59f8aa81\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.564735 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmkh4\" (UniqueName: \"kubernetes.io/projected/4ea2f795-bc17-496a-af2a-934d59f8aa81-kube-api-access-hmkh4\") pod \"4ea2f795-bc17-496a-af2a-934d59f8aa81\" (UID: \"4ea2f795-bc17-496a-af2a-934d59f8aa81\") " Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.569739 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ea2f795-bc17-496a-af2a-934d59f8aa81-kube-api-access-hmkh4" (OuterVolumeSpecName: "kube-api-access-hmkh4") pod "4ea2f795-bc17-496a-af2a-934d59f8aa81" (UID: "4ea2f795-bc17-496a-af2a-934d59f8aa81"). InnerVolumeSpecName "kube-api-access-hmkh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.595918 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4ea2f795-bc17-496a-af2a-934d59f8aa81" (UID: "4ea2f795-bc17-496a-af2a-934d59f8aa81"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.613642 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-inventory" (OuterVolumeSpecName: "inventory") pod "4ea2f795-bc17-496a-af2a-934d59f8aa81" (UID: "4ea2f795-bc17-496a-af2a-934d59f8aa81"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.668320 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.668358 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ea2f795-bc17-496a-af2a-934d59f8aa81-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.668373 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmkh4\" (UniqueName: \"kubernetes.io/projected/4ea2f795-bc17-496a-af2a-934d59f8aa81-kube-api-access-hmkh4\") on node \"crc\" DevicePath \"\"" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.984796 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" event={"ID":"4ea2f795-bc17-496a-af2a-934d59f8aa81","Type":"ContainerDied","Data":"269b071d2efc7b95f67dd84d1913f271b6e4431180ad887b8a99fb9be57430df"} Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.984845 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="269b071d2efc7b95f67dd84d1913f271b6e4431180ad887b8a99fb9be57430df" Jan 23 11:28:06 crc kubenswrapper[4689]: I0123 11:28:06.984951 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lwxl8" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.061417 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb"] Jan 23 11:28:07 crc kubenswrapper[4689]: E0123 11:28:07.062003 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea2f795-bc17-496a-af2a-934d59f8aa81" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.062025 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea2f795-bc17-496a-af2a-934d59f8aa81" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.062295 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea2f795-bc17-496a-af2a-934d59f8aa81" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.063199 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.065209 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.065590 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.065590 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.067947 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.083439 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb"] Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.186747 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gfm4\" (UniqueName: \"kubernetes.io/projected/98b02990-c192-41b3-88a5-556560831704-kube-api-access-2gfm4\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.186987 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.187035 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.288679 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.288746 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.288903 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gfm4\" (UniqueName: \"kubernetes.io/projected/98b02990-c192-41b3-88a5-556560831704-kube-api-access-2gfm4\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.296862 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.308996 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.325106 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gfm4\" (UniqueName: \"kubernetes.io/projected/98b02990-c192-41b3-88a5-556560831704-kube-api-access-2gfm4\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.404619 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.946294 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb"] Jan 23 11:28:07 crc kubenswrapper[4689]: W0123 11:28:07.951362 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98b02990_c192_41b3_88a5_556560831704.slice/crio-5385adf285c306162ac6b0f8341339eeb6641811b93b52ced23c68e3262d743d WatchSource:0}: Error finding container 5385adf285c306162ac6b0f8341339eeb6641811b93b52ced23c68e3262d743d: Status 404 returned error can't find the container with id 5385adf285c306162ac6b0f8341339eeb6641811b93b52ced23c68e3262d743d Jan 23 11:28:07 crc kubenswrapper[4689]: I0123 11:28:07.995744 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" event={"ID":"98b02990-c192-41b3-88a5-556560831704","Type":"ContainerStarted","Data":"5385adf285c306162ac6b0f8341339eeb6641811b93b52ced23c68e3262d743d"} Jan 23 11:28:09 crc kubenswrapper[4689]: I0123 11:28:09.006995 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" event={"ID":"98b02990-c192-41b3-88a5-556560831704","Type":"ContainerStarted","Data":"2ebf2e2158ffa368e4ff680a2f7a03cf993c00521442896d3d4605eab40970a0"} Jan 23 11:28:09 crc kubenswrapper[4689]: I0123 11:28:09.029663 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" podStartSLOduration=1.569682236 podStartE2EDuration="2.029643568s" podCreationTimestamp="2026-01-23 11:28:07 +0000 UTC" firstStartedPulling="2026-01-23 11:28:07.957822151 +0000 UTC m=+2352.582502010" lastFinishedPulling="2026-01-23 11:28:08.417783463 +0000 UTC m=+2353.042463342" observedRunningTime="2026-01-23 11:28:09.021676399 +0000 UTC m=+2353.646356258" watchObservedRunningTime="2026-01-23 11:28:09.029643568 +0000 UTC m=+2353.654323437" Jan 23 11:28:10 crc kubenswrapper[4689]: I0123 11:28:10.640537 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:28:10 crc kubenswrapper[4689]: E0123 11:28:10.641456 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:28:19 crc kubenswrapper[4689]: I0123 11:28:19.134758 4689 generic.go:334] "Generic (PLEG): container finished" podID="98b02990-c192-41b3-88a5-556560831704" containerID="2ebf2e2158ffa368e4ff680a2f7a03cf993c00521442896d3d4605eab40970a0" exitCode=0 Jan 23 11:28:19 crc kubenswrapper[4689]: I0123 11:28:19.134850 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" event={"ID":"98b02990-c192-41b3-88a5-556560831704","Type":"ContainerDied","Data":"2ebf2e2158ffa368e4ff680a2f7a03cf993c00521442896d3d4605eab40970a0"} Jan 23 11:28:19 crc kubenswrapper[4689]: I0123 11:28:19.370960 4689 scope.go:117] "RemoveContainer" containerID="1c8f7b9502409880289e9b7dcca2477d7d5efda1d5de926f2f8394e0fc5baace" Jan 23 11:28:23 crc kubenswrapper[4689]: I0123 11:28:23.641255 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:28:23 crc kubenswrapper[4689]: E0123 11:28:23.642663 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.581227 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.733731 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gfm4\" (UniqueName: \"kubernetes.io/projected/98b02990-c192-41b3-88a5-556560831704-kube-api-access-2gfm4\") pod \"98b02990-c192-41b3-88a5-556560831704\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.734099 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-inventory\") pod \"98b02990-c192-41b3-88a5-556560831704\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.735016 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-ssh-key-openstack-edpm-ipam\") pod \"98b02990-c192-41b3-88a5-556560831704\" (UID: \"98b02990-c192-41b3-88a5-556560831704\") " Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.743486 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98b02990-c192-41b3-88a5-556560831704-kube-api-access-2gfm4" (OuterVolumeSpecName: "kube-api-access-2gfm4") pod "98b02990-c192-41b3-88a5-556560831704" (UID: "98b02990-c192-41b3-88a5-556560831704"). InnerVolumeSpecName "kube-api-access-2gfm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.768029 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-inventory" (OuterVolumeSpecName: "inventory") pod "98b02990-c192-41b3-88a5-556560831704" (UID: "98b02990-c192-41b3-88a5-556560831704"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.775233 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "98b02990-c192-41b3-88a5-556560831704" (UID: "98b02990-c192-41b3-88a5-556560831704"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.838383 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gfm4\" (UniqueName: \"kubernetes.io/projected/98b02990-c192-41b3-88a5-556560831704-kube-api-access-2gfm4\") on node \"crc\" DevicePath \"\"" Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.838424 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:28:28 crc kubenswrapper[4689]: I0123 11:28:28.838434 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/98b02990-c192-41b3-88a5-556560831704-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.499795 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" event={"ID":"98b02990-c192-41b3-88a5-556560831704","Type":"ContainerDied","Data":"5385adf285c306162ac6b0f8341339eeb6641811b93b52ced23c68e3262d743d"} Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.499844 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5385adf285c306162ac6b0f8341339eeb6641811b93b52ced23c68e3262d743d" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.499947 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.705297 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4"] Jan 23 11:28:29 crc kubenswrapper[4689]: E0123 11:28:29.705881 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98b02990-c192-41b3-88a5-556560831704" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.705895 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="98b02990-c192-41b3-88a5-556560831704" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.706132 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="98b02990-c192-41b3-88a5-556560831704" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.707037 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.709813 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.709900 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.711388 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.711625 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.712880 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.713076 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.713133 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.713244 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.714594 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.717826 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4"] Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.891072 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.893825 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.893964 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtlr8\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-kube-api-access-xtlr8\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894027 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894130 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894227 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894271 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894345 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894407 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894461 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894515 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894581 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894705 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894816 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894877 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.894949 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.996764 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.996820 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.996856 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.996906 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.996942 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.996970 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997002 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997046 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997310 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997758 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997815 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997855 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997895 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997934 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.997990 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtlr8\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-kube-api-access-xtlr8\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:29 crc kubenswrapper[4689]: I0123 11:28:29.998016 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.002081 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.002551 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.002862 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.004303 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.004547 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.004662 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.004683 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.004635 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.005083 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.005349 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.006646 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.007323 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.011266 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.016798 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.017047 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.019425 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtlr8\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-kube-api-access-xtlr8\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.039575 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:28:30 crc kubenswrapper[4689]: I0123 11:28:30.613252 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4"] Jan 23 11:28:31 crc kubenswrapper[4689]: I0123 11:28:31.518471 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" event={"ID":"7a8f484d-8ae2-4eb5-873c-8051270d53ea","Type":"ContainerStarted","Data":"16ac127e5565456984fbbac0f70ee76c5d46e8cb57fd73c5b8baaa04f9186973"} Jan 23 11:28:35 crc kubenswrapper[4689]: I0123 11:28:35.581200 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" event={"ID":"7a8f484d-8ae2-4eb5-873c-8051270d53ea","Type":"ContainerStarted","Data":"31995f65fbe5c6106c830fa798c2fed892278b09e075ea72e83f6b14bd9a9e5c"} Jan 23 11:28:35 crc kubenswrapper[4689]: I0123 11:28:35.601801 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" podStartSLOduration=2.39464602 podStartE2EDuration="6.601780948s" podCreationTimestamp="2026-01-23 11:28:29 +0000 UTC" firstStartedPulling="2026-01-23 11:28:30.620979232 +0000 UTC m=+2375.245659091" lastFinishedPulling="2026-01-23 11:28:34.82811414 +0000 UTC m=+2379.452794019" observedRunningTime="2026-01-23 11:28:35.599238314 +0000 UTC m=+2380.223918183" watchObservedRunningTime="2026-01-23 11:28:35.601780948 +0000 UTC m=+2380.226460807" Jan 23 11:28:35 crc kubenswrapper[4689]: I0123 11:28:35.649521 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:28:35 crc kubenswrapper[4689]: E0123 11:28:35.649820 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:28:45 crc kubenswrapper[4689]: I0123 11:28:45.044398 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-fpbbp"] Jan 23 11:28:45 crc kubenswrapper[4689]: I0123 11:28:45.059010 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-fpbbp"] Jan 23 11:28:45 crc kubenswrapper[4689]: I0123 11:28:45.655393 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30e7aa97-9923-4441-9a8a-5f320f5f3b85" path="/var/lib/kubelet/pods/30e7aa97-9923-4441-9a8a-5f320f5f3b85/volumes" Jan 23 11:28:50 crc kubenswrapper[4689]: I0123 11:28:50.641543 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:28:50 crc kubenswrapper[4689]: E0123 11:28:50.642878 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:29:05 crc kubenswrapper[4689]: I0123 11:29:05.640352 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:29:05 crc kubenswrapper[4689]: E0123 11:29:05.641332 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:29:16 crc kubenswrapper[4689]: I0123 11:29:16.640729 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:29:16 crc kubenswrapper[4689]: E0123 11:29:16.641662 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:29:23 crc kubenswrapper[4689]: I0123 11:29:23.114026 4689 generic.go:334] "Generic (PLEG): container finished" podID="7a8f484d-8ae2-4eb5-873c-8051270d53ea" containerID="31995f65fbe5c6106c830fa798c2fed892278b09e075ea72e83f6b14bd9a9e5c" exitCode=0 Jan 23 11:29:23 crc kubenswrapper[4689]: I0123 11:29:23.114118 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" event={"ID":"7a8f484d-8ae2-4eb5-873c-8051270d53ea","Type":"ContainerDied","Data":"31995f65fbe5c6106c830fa798c2fed892278b09e075ea72e83f6b14bd9a9e5c"} Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.676194 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.873971 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtlr8\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-kube-api-access-xtlr8\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.874072 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-repo-setup-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.874567 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.875061 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.875144 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-ovn-default-certs-0\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.875683 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ovn-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.875735 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-neutron-metadata-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.875923 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-inventory\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876066 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-power-monitoring-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876530 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ssh-key-openstack-edpm-ipam\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876588 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-libvirt-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876673 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876717 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876782 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876830 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-nova-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.876894 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-bootstrap-combined-ca-bundle\") pod \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\" (UID: \"7a8f484d-8ae2-4eb5-873c-8051270d53ea\") " Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.880046 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.882140 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.882591 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.882608 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-kube-api-access-xtlr8" (OuterVolumeSpecName: "kube-api-access-xtlr8") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "kube-api-access-xtlr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.882767 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.884968 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.885106 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.885471 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.885502 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.885644 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.886472 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.886971 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.887384 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.887936 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.910377 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-inventory" (OuterVolumeSpecName: "inventory") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.915219 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7a8f484d-8ae2-4eb5-873c-8051270d53ea" (UID: "7a8f484d-8ae2-4eb5-873c-8051270d53ea"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980317 4689 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980367 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980381 4689 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980392 4689 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980410 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980424 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980438 4689 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980449 4689 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980459 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtlr8\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-kube-api-access-xtlr8\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980469 4689 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980482 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980494 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980506 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7a8f484d-8ae2-4eb5-873c-8051270d53ea-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980517 4689 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980527 4689 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:24 crc kubenswrapper[4689]: I0123 11:29:24.980538 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a8f484d-8ae2-4eb5-873c-8051270d53ea-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.137271 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" event={"ID":"7a8f484d-8ae2-4eb5-873c-8051270d53ea","Type":"ContainerDied","Data":"16ac127e5565456984fbbac0f70ee76c5d46e8cb57fd73c5b8baaa04f9186973"} Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.137326 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16ac127e5565456984fbbac0f70ee76c5d46e8cb57fd73c5b8baaa04f9186973" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.137644 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.250675 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k"] Jan 23 11:29:25 crc kubenswrapper[4689]: E0123 11:29:25.251165 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a8f484d-8ae2-4eb5-873c-8051270d53ea" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.251182 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a8f484d-8ae2-4eb5-873c-8051270d53ea" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.251389 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a8f484d-8ae2-4eb5-873c-8051270d53ea" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.252204 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.258125 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.258434 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.258258 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.259923 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.260103 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.291326 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.291720 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.301436 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.302095 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqccv\" (UniqueName: \"kubernetes.io/projected/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-kube-api-access-bqccv\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.307570 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.293792 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k"] Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.411237 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqccv\" (UniqueName: \"kubernetes.io/projected/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-kube-api-access-bqccv\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.411348 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.411417 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.411497 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.411541 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.412522 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.416648 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.418763 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.421585 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.428306 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqccv\" (UniqueName: \"kubernetes.io/projected/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-kube-api-access-bqccv\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-pwj4k\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:25 crc kubenswrapper[4689]: I0123 11:29:25.575908 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:29:26 crc kubenswrapper[4689]: I0123 11:29:26.027475 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k"] Jan 23 11:29:26 crc kubenswrapper[4689]: I0123 11:29:26.157925 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" event={"ID":"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0","Type":"ContainerStarted","Data":"af8f581c8ebdbf3a925e7f96ecfdeff74b8137feee6539cee8b3df20f66fbbae"} Jan 23 11:29:27 crc kubenswrapper[4689]: I0123 11:29:27.170802 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" event={"ID":"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0","Type":"ContainerStarted","Data":"b0edbcf3fde706b7b992fe386a2dc71525b938bcf2e7a919f13f1077481a3745"} Jan 23 11:29:27 crc kubenswrapper[4689]: I0123 11:29:27.200529 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" podStartSLOduration=1.6505852349999999 podStartE2EDuration="2.200492313s" podCreationTimestamp="2026-01-23 11:29:25 +0000 UTC" firstStartedPulling="2026-01-23 11:29:26.024343243 +0000 UTC m=+2430.649023112" lastFinishedPulling="2026-01-23 11:29:26.574250331 +0000 UTC m=+2431.198930190" observedRunningTime="2026-01-23 11:29:27.190974866 +0000 UTC m=+2431.815654735" watchObservedRunningTime="2026-01-23 11:29:27.200492313 +0000 UTC m=+2431.825172172" Jan 23 11:29:27 crc kubenswrapper[4689]: I0123 11:29:27.640141 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:29:27 crc kubenswrapper[4689]: E0123 11:29:27.640782 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:29:28 crc kubenswrapper[4689]: I0123 11:29:28.618064 4689 scope.go:117] "RemoveContainer" containerID="92444a0342b62ea610a7f32f254b21f34bf6d604c290564fc653d7afce9f5f14" Jan 23 11:29:39 crc kubenswrapper[4689]: I0123 11:29:39.640175 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:29:39 crc kubenswrapper[4689]: E0123 11:29:39.641248 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:29:54 crc kubenswrapper[4689]: I0123 11:29:54.640317 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:29:54 crc kubenswrapper[4689]: E0123 11:29:54.641319 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.156851 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg"] Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.159810 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.162112 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.163406 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.186297 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg"] Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.337172 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1976a333-0b40-42b5-af0b-f817b46cbbc3-secret-volume\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.337291 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1976a333-0b40-42b5-af0b-f817b46cbbc3-config-volume\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.337417 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q79zg\" (UniqueName: \"kubernetes.io/projected/1976a333-0b40-42b5-af0b-f817b46cbbc3-kube-api-access-q79zg\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.439392 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1976a333-0b40-42b5-af0b-f817b46cbbc3-secret-volume\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.439493 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1976a333-0b40-42b5-af0b-f817b46cbbc3-config-volume\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.439610 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q79zg\" (UniqueName: \"kubernetes.io/projected/1976a333-0b40-42b5-af0b-f817b46cbbc3-kube-api-access-q79zg\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.440549 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1976a333-0b40-42b5-af0b-f817b46cbbc3-config-volume\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.446057 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1976a333-0b40-42b5-af0b-f817b46cbbc3-secret-volume\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.458515 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q79zg\" (UniqueName: \"kubernetes.io/projected/1976a333-0b40-42b5-af0b-f817b46cbbc3-kube-api-access-q79zg\") pod \"collect-profiles-29486130-hbqgg\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:00 crc kubenswrapper[4689]: I0123 11:30:00.480947 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:01 crc kubenswrapper[4689]: I0123 11:30:01.033828 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg"] Jan 23 11:30:01 crc kubenswrapper[4689]: I0123 11:30:01.552323 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" event={"ID":"1976a333-0b40-42b5-af0b-f817b46cbbc3","Type":"ContainerStarted","Data":"3bd8f63a66c771a5479f7167a52d15bfef12eeccaf2dcf8b53ca21f361d00fe4"} Jan 23 11:30:01 crc kubenswrapper[4689]: I0123 11:30:01.552656 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" event={"ID":"1976a333-0b40-42b5-af0b-f817b46cbbc3","Type":"ContainerStarted","Data":"9b5e31f582cf45d26e376469bf6334105ec0bb1d048df814835138e99befc287"} Jan 23 11:30:01 crc kubenswrapper[4689]: I0123 11:30:01.568348 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" podStartSLOduration=1.5683299229999998 podStartE2EDuration="1.568329923s" podCreationTimestamp="2026-01-23 11:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:30:01.567176287 +0000 UTC m=+2466.191856146" watchObservedRunningTime="2026-01-23 11:30:01.568329923 +0000 UTC m=+2466.193009782" Jan 23 11:30:02 crc kubenswrapper[4689]: I0123 11:30:02.571763 4689 generic.go:334] "Generic (PLEG): container finished" podID="1976a333-0b40-42b5-af0b-f817b46cbbc3" containerID="3bd8f63a66c771a5479f7167a52d15bfef12eeccaf2dcf8b53ca21f361d00fe4" exitCode=0 Jan 23 11:30:02 crc kubenswrapper[4689]: I0123 11:30:02.571856 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" event={"ID":"1976a333-0b40-42b5-af0b-f817b46cbbc3","Type":"ContainerDied","Data":"3bd8f63a66c771a5479f7167a52d15bfef12eeccaf2dcf8b53ca21f361d00fe4"} Jan 23 11:30:03 crc kubenswrapper[4689]: I0123 11:30:03.965657 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.072948 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1976a333-0b40-42b5-af0b-f817b46cbbc3-config-volume\") pod \"1976a333-0b40-42b5-af0b-f817b46cbbc3\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.073356 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q79zg\" (UniqueName: \"kubernetes.io/projected/1976a333-0b40-42b5-af0b-f817b46cbbc3-kube-api-access-q79zg\") pod \"1976a333-0b40-42b5-af0b-f817b46cbbc3\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.073549 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1976a333-0b40-42b5-af0b-f817b46cbbc3-secret-volume\") pod \"1976a333-0b40-42b5-af0b-f817b46cbbc3\" (UID: \"1976a333-0b40-42b5-af0b-f817b46cbbc3\") " Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.074218 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1976a333-0b40-42b5-af0b-f817b46cbbc3-config-volume" (OuterVolumeSpecName: "config-volume") pod "1976a333-0b40-42b5-af0b-f817b46cbbc3" (UID: "1976a333-0b40-42b5-af0b-f817b46cbbc3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.074617 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1976a333-0b40-42b5-af0b-f817b46cbbc3-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.080526 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1976a333-0b40-42b5-af0b-f817b46cbbc3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1976a333-0b40-42b5-af0b-f817b46cbbc3" (UID: "1976a333-0b40-42b5-af0b-f817b46cbbc3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.089449 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1976a333-0b40-42b5-af0b-f817b46cbbc3-kube-api-access-q79zg" (OuterVolumeSpecName: "kube-api-access-q79zg") pod "1976a333-0b40-42b5-af0b-f817b46cbbc3" (UID: "1976a333-0b40-42b5-af0b-f817b46cbbc3"). InnerVolumeSpecName "kube-api-access-q79zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.176219 4689 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1976a333-0b40-42b5-af0b-f817b46cbbc3-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.176525 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q79zg\" (UniqueName: \"kubernetes.io/projected/1976a333-0b40-42b5-af0b-f817b46cbbc3-kube-api-access-q79zg\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.604684 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" event={"ID":"1976a333-0b40-42b5-af0b-f817b46cbbc3","Type":"ContainerDied","Data":"9b5e31f582cf45d26e376469bf6334105ec0bb1d048df814835138e99befc287"} Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.604768 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.604800 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b5e31f582cf45d26e376469bf6334105ec0bb1d048df814835138e99befc287" Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.653084 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx"] Jan 23 11:30:04 crc kubenswrapper[4689]: I0123 11:30:04.664386 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486085-kzpnx"] Jan 23 11:30:05 crc kubenswrapper[4689]: I0123 11:30:05.673473 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c83ad550-7576-440f-bac5-8308b6c801b0" path="/var/lib/kubelet/pods/c83ad550-7576-440f-bac5-8308b6c801b0/volumes" Jan 23 11:30:06 crc kubenswrapper[4689]: I0123 11:30:06.640859 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:30:06 crc kubenswrapper[4689]: E0123 11:30:06.641346 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:30:21 crc kubenswrapper[4689]: I0123 11:30:21.640769 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:30:21 crc kubenswrapper[4689]: E0123 11:30:21.641639 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:30:21 crc kubenswrapper[4689]: I0123 11:30:21.773403 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 11:30:28 crc kubenswrapper[4689]: I0123 11:30:28.687131 4689 scope.go:117] "RemoveContainer" containerID="9150f9d267f2d3d18e29021e4b3549dfc8d5cdc74e9f7f595819631794282cf0" Jan 23 11:30:32 crc kubenswrapper[4689]: I0123 11:30:32.640582 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:30:32 crc kubenswrapper[4689]: E0123 11:30:32.641400 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:30:33 crc kubenswrapper[4689]: I0123 11:30:33.910283 4689 generic.go:334] "Generic (PLEG): container finished" podID="cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" containerID="b0edbcf3fde706b7b992fe386a2dc71525b938bcf2e7a919f13f1077481a3745" exitCode=0 Jan 23 11:30:33 crc kubenswrapper[4689]: I0123 11:30:33.910369 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" event={"ID":"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0","Type":"ContainerDied","Data":"b0edbcf3fde706b7b992fe386a2dc71525b938bcf2e7a919f13f1077481a3745"} Jan 23 11:30:35 crc kubenswrapper[4689]: I0123 11:30:35.388283 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:30:35 crc kubenswrapper[4689]: I0123 11:30:35.479038 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqccv\" (UniqueName: \"kubernetes.io/projected/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-kube-api-access-bqccv\") pod \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " Jan 23 11:30:35 crc kubenswrapper[4689]: I0123 11:30:35.479499 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovncontroller-config-0\") pod \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " Jan 23 11:30:35 crc kubenswrapper[4689]: I0123 11:30:35.479744 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-inventory\") pod \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " Jan 23 11:30:35 crc kubenswrapper[4689]: I0123 11:30:35.479969 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovn-combined-ca-bundle\") pod \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " Jan 23 11:30:35 crc kubenswrapper[4689]: I0123 11:30:35.480075 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ssh-key-openstack-edpm-ipam\") pod \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\" (UID: \"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0\") " Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.464524 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-kube-api-access-bqccv" (OuterVolumeSpecName: "kube-api-access-bqccv") pod "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" (UID: "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0"). InnerVolumeSpecName "kube-api-access-bqccv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.468283 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqccv\" (UniqueName: \"kubernetes.io/projected/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-kube-api-access-bqccv\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.474418 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" (UID: "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.475579 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" (UID: "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.489394 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" (UID: "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.501386 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-inventory" (OuterVolumeSpecName: "inventory") pod "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" (UID: "cf6b169f-f9ae-4ce5-9a76-b98b00912ea0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.539793 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.543868 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-pwj4k" event={"ID":"cf6b169f-f9ae-4ce5-9a76-b98b00912ea0","Type":"ContainerDied","Data":"af8f581c8ebdbf3a925e7f96ecfdeff74b8137feee6539cee8b3df20f66fbbae"} Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.543909 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af8f581c8ebdbf3a925e7f96ecfdeff74b8137feee6539cee8b3df20f66fbbae" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.575469 4689 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.575497 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.575508 4689 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.575516 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf6b169f-f9ae-4ce5-9a76-b98b00912ea0-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.593954 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5"] Jan 23 11:30:36 crc kubenswrapper[4689]: E0123 11:30:36.594502 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.594521 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 23 11:30:36 crc kubenswrapper[4689]: E0123 11:30:36.594571 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1976a333-0b40-42b5-af0b-f817b46cbbc3" containerName="collect-profiles" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.594581 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="1976a333-0b40-42b5-af0b-f817b46cbbc3" containerName="collect-profiles" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.594867 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="1976a333-0b40-42b5-af0b-f817b46cbbc3" containerName="collect-profiles" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.594900 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf6b169f-f9ae-4ce5-9a76-b98b00912ea0" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.595876 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.599985 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.600004 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.600239 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.600250 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.600424 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.600474 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.605878 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5"] Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.678291 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.678351 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.678583 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sp7p\" (UniqueName: \"kubernetes.io/projected/5fa207b4-af47-4f97-976c-8d6ac264443e-kube-api-access-2sp7p\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.678856 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.678896 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.678948 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.781116 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sp7p\" (UniqueName: \"kubernetes.io/projected/5fa207b4-af47-4f97-976c-8d6ac264443e-kube-api-access-2sp7p\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.781302 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.781330 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.781357 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.781459 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.781485 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.786499 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.786961 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.787230 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.787550 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.791633 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.801318 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sp7p\" (UniqueName: \"kubernetes.io/projected/5fa207b4-af47-4f97-976c-8d6ac264443e-kube-api-access-2sp7p\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:36 crc kubenswrapper[4689]: I0123 11:30:36.932264 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:30:38 crc kubenswrapper[4689]: I0123 11:30:38.190508 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5"] Jan 23 11:30:38 crc kubenswrapper[4689]: I0123 11:30:38.568103 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" event={"ID":"5fa207b4-af47-4f97-976c-8d6ac264443e","Type":"ContainerStarted","Data":"8fa25450471ec3bbea3ccadce6bcb8622d9739fe7542191e308f091bbae29b7a"} Jan 23 11:30:44 crc kubenswrapper[4689]: I0123 11:30:44.640723 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" event={"ID":"5fa207b4-af47-4f97-976c-8d6ac264443e","Type":"ContainerStarted","Data":"f4271adf6ba9f88e78a456da06e081855bdbbdba0a45e05c91dc81b26391037c"} Jan 23 11:30:44 crc kubenswrapper[4689]: I0123 11:30:44.665437 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" podStartSLOduration=3.850358101 podStartE2EDuration="8.665415773s" podCreationTimestamp="2026-01-23 11:30:36 +0000 UTC" firstStartedPulling="2026-01-23 11:30:38.189412769 +0000 UTC m=+2502.814092648" lastFinishedPulling="2026-01-23 11:30:43.004470421 +0000 UTC m=+2507.629150320" observedRunningTime="2026-01-23 11:30:44.661659773 +0000 UTC m=+2509.286339632" watchObservedRunningTime="2026-01-23 11:30:44.665415773 +0000 UTC m=+2509.290095642" Jan 23 11:30:45 crc kubenswrapper[4689]: I0123 11:30:45.648298 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:30:47 crc kubenswrapper[4689]: I0123 11:30:47.712866 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"e2f14954f9a6d7524a9610da965b62870acd17693d30353a3b1277b07188ee90"} Jan 23 11:31:06 crc kubenswrapper[4689]: I0123 11:31:06.615538 4689 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podcf6b169f-f9ae-4ce5-9a76-b98b00912ea0"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podcf6b169f-f9ae-4ce5-9a76-b98b00912ea0] : Timed out while waiting for systemd to remove kubepods-besteffort-podcf6b169f_f9ae_4ce5_9a76_b98b00912ea0.slice" Jan 23 11:31:28 crc kubenswrapper[4689]: I0123 11:31:28.904179 4689 scope.go:117] "RemoveContainer" containerID="47a4d541c82d2ff52d041abc564fa46f580ba1edc445fd6ff4e8b948b3c68d1a" Jan 23 11:31:28 crc kubenswrapper[4689]: I0123 11:31:28.927845 4689 scope.go:117] "RemoveContainer" containerID="f5ac6691fd688025c3c73d0c109c4572e6d4faa6aa81e60a5aef470d69fc214b" Jan 23 11:31:28 crc kubenswrapper[4689]: I0123 11:31:28.952601 4689 scope.go:117] "RemoveContainer" containerID="606c4a6d4a750e86f7222ae55b12b08864d206b668f0a9099d92b8bb22bdae41" Jan 23 11:31:31 crc kubenswrapper[4689]: I0123 11:31:31.204317 4689 generic.go:334] "Generic (PLEG): container finished" podID="5fa207b4-af47-4f97-976c-8d6ac264443e" containerID="f4271adf6ba9f88e78a456da06e081855bdbbdba0a45e05c91dc81b26391037c" exitCode=0 Jan 23 11:31:31 crc kubenswrapper[4689]: I0123 11:31:31.204445 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" event={"ID":"5fa207b4-af47-4f97-976c-8d6ac264443e","Type":"ContainerDied","Data":"f4271adf6ba9f88e78a456da06e081855bdbbdba0a45e05c91dc81b26391037c"} Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.787991 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.859746 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sp7p\" (UniqueName: \"kubernetes.io/projected/5fa207b4-af47-4f97-976c-8d6ac264443e-kube-api-access-2sp7p\") pod \"5fa207b4-af47-4f97-976c-8d6ac264443e\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.859872 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"5fa207b4-af47-4f97-976c-8d6ac264443e\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.860019 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-inventory\") pod \"5fa207b4-af47-4f97-976c-8d6ac264443e\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.860134 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-nova-metadata-neutron-config-0\") pod \"5fa207b4-af47-4f97-976c-8d6ac264443e\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.860190 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-ssh-key-openstack-edpm-ipam\") pod \"5fa207b4-af47-4f97-976c-8d6ac264443e\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.860242 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-metadata-combined-ca-bundle\") pod \"5fa207b4-af47-4f97-976c-8d6ac264443e\" (UID: \"5fa207b4-af47-4f97-976c-8d6ac264443e\") " Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.866556 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "5fa207b4-af47-4f97-976c-8d6ac264443e" (UID: "5fa207b4-af47-4f97-976c-8d6ac264443e"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.869310 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fa207b4-af47-4f97-976c-8d6ac264443e-kube-api-access-2sp7p" (OuterVolumeSpecName: "kube-api-access-2sp7p") pod "5fa207b4-af47-4f97-976c-8d6ac264443e" (UID: "5fa207b4-af47-4f97-976c-8d6ac264443e"). InnerVolumeSpecName "kube-api-access-2sp7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.891578 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "5fa207b4-af47-4f97-976c-8d6ac264443e" (UID: "5fa207b4-af47-4f97-976c-8d6ac264443e"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.893125 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "5fa207b4-af47-4f97-976c-8d6ac264443e" (UID: "5fa207b4-af47-4f97-976c-8d6ac264443e"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.902833 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "5fa207b4-af47-4f97-976c-8d6ac264443e" (UID: "5fa207b4-af47-4f97-976c-8d6ac264443e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.907836 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-inventory" (OuterVolumeSpecName: "inventory") pod "5fa207b4-af47-4f97-976c-8d6ac264443e" (UID: "5fa207b4-af47-4f97-976c-8d6ac264443e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.962720 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.962752 4689 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.962770 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.962787 4689 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.962803 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sp7p\" (UniqueName: \"kubernetes.io/projected/5fa207b4-af47-4f97-976c-8d6ac264443e-kube-api-access-2sp7p\") on node \"crc\" DevicePath \"\"" Jan 23 11:31:32 crc kubenswrapper[4689]: I0123 11:31:32.962816 4689 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5fa207b4-af47-4f97-976c-8d6ac264443e-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.227592 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" event={"ID":"5fa207b4-af47-4f97-976c-8d6ac264443e","Type":"ContainerDied","Data":"8fa25450471ec3bbea3ccadce6bcb8622d9739fe7542191e308f091bbae29b7a"} Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.227636 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8fa25450471ec3bbea3ccadce6bcb8622d9739fe7542191e308f091bbae29b7a" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.227675 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.320823 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz"] Jan 23 11:31:33 crc kubenswrapper[4689]: E0123 11:31:33.321400 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fa207b4-af47-4f97-976c-8d6ac264443e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.321424 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fa207b4-af47-4f97-976c-8d6ac264443e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.321710 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fa207b4-af47-4f97-976c-8d6ac264443e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.322500 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.330986 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.331266 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.331749 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.336361 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.336575 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.336916 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz"] Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.371588 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.371931 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.372034 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfcwl\" (UniqueName: \"kubernetes.io/projected/e5b31e85-298a-4959-b5d0-87adb59850b6-kube-api-access-vfcwl\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.372107 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.372132 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.473607 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.473659 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.473794 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.473837 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.473971 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfcwl\" (UniqueName: \"kubernetes.io/projected/e5b31e85-298a-4959-b5d0-87adb59850b6-kube-api-access-vfcwl\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.479966 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.480669 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.483995 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.489465 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.495780 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfcwl\" (UniqueName: \"kubernetes.io/projected/e5b31e85-298a-4959-b5d0-87adb59850b6-kube-api-access-vfcwl\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:33 crc kubenswrapper[4689]: I0123 11:31:33.640438 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:31:34 crc kubenswrapper[4689]: I0123 11:31:34.239676 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz"] Jan 23 11:31:35 crc kubenswrapper[4689]: I0123 11:31:35.253125 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" event={"ID":"e5b31e85-298a-4959-b5d0-87adb59850b6","Type":"ContainerStarted","Data":"12fd67747c91199f1a3f6d6f4cd335ca2d8575f1369013ba53b048ad932abc49"} Jan 23 11:31:38 crc kubenswrapper[4689]: I0123 11:31:38.286427 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" event={"ID":"e5b31e85-298a-4959-b5d0-87adb59850b6","Type":"ContainerStarted","Data":"8ff941691c4f01a5f2503a0c2821c630ce2de5b40681a6167b5aad752dc1749e"} Jan 23 11:31:38 crc kubenswrapper[4689]: I0123 11:31:38.316500 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" podStartSLOduration=2.642645311 podStartE2EDuration="5.316476034s" podCreationTimestamp="2026-01-23 11:31:33 +0000 UTC" firstStartedPulling="2026-01-23 11:31:34.256296639 +0000 UTC m=+2558.880976498" lastFinishedPulling="2026-01-23 11:31:36.930127352 +0000 UTC m=+2561.554807221" observedRunningTime="2026-01-23 11:31:38.302179733 +0000 UTC m=+2562.926859602" watchObservedRunningTime="2026-01-23 11:31:38.316476034 +0000 UTC m=+2562.941155903" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.519824 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-t47k6"] Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.523406 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.563735 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t47k6"] Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.684268 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmhm2\" (UniqueName: \"kubernetes.io/projected/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-kube-api-access-vmhm2\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.684365 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-utilities\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.684536 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-catalog-content\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.787183 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmhm2\" (UniqueName: \"kubernetes.io/projected/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-kube-api-access-vmhm2\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.787273 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-utilities\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.787763 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-utilities\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.787944 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-catalog-content\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.788299 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-catalog-content\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.819193 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmhm2\" (UniqueName: \"kubernetes.io/projected/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-kube-api-access-vmhm2\") pod \"redhat-marketplace-t47k6\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:06 crc kubenswrapper[4689]: I0123 11:32:06.859267 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:07 crc kubenswrapper[4689]: I0123 11:32:07.438442 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t47k6"] Jan 23 11:32:07 crc kubenswrapper[4689]: I0123 11:32:07.626429 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t47k6" event={"ID":"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a","Type":"ContainerStarted","Data":"d1dd2f429b5523e3b2e1edd3c2a74837cd84efa417b717522b38a3efc15a9afa"} Jan 23 11:32:08 crc kubenswrapper[4689]: I0123 11:32:08.638662 4689 generic.go:334] "Generic (PLEG): container finished" podID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerID="f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c" exitCode=0 Jan 23 11:32:08 crc kubenswrapper[4689]: I0123 11:32:08.638706 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t47k6" event={"ID":"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a","Type":"ContainerDied","Data":"f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c"} Jan 23 11:32:08 crc kubenswrapper[4689]: I0123 11:32:08.643676 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:32:10 crc kubenswrapper[4689]: I0123 11:32:10.659697 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t47k6" event={"ID":"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a","Type":"ContainerStarted","Data":"b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8"} Jan 23 11:32:11 crc kubenswrapper[4689]: I0123 11:32:11.672890 4689 generic.go:334] "Generic (PLEG): container finished" podID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerID="b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8" exitCode=0 Jan 23 11:32:11 crc kubenswrapper[4689]: I0123 11:32:11.672999 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t47k6" event={"ID":"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a","Type":"ContainerDied","Data":"b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8"} Jan 23 11:32:15 crc kubenswrapper[4689]: I0123 11:32:15.718336 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t47k6" event={"ID":"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a","Type":"ContainerStarted","Data":"3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9"} Jan 23 11:32:15 crc kubenswrapper[4689]: I0123 11:32:15.743307 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-t47k6" podStartSLOduration=4.584497929 podStartE2EDuration="9.743286642s" podCreationTimestamp="2026-01-23 11:32:06 +0000 UTC" firstStartedPulling="2026-01-23 11:32:08.643417263 +0000 UTC m=+2593.268097112" lastFinishedPulling="2026-01-23 11:32:13.802205956 +0000 UTC m=+2598.426885825" observedRunningTime="2026-01-23 11:32:15.738564489 +0000 UTC m=+2600.363244358" watchObservedRunningTime="2026-01-23 11:32:15.743286642 +0000 UTC m=+2600.367966511" Jan 23 11:32:16 crc kubenswrapper[4689]: I0123 11:32:16.860236 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:16 crc kubenswrapper[4689]: I0123 11:32:16.860629 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:16 crc kubenswrapper[4689]: I0123 11:32:16.918251 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:26 crc kubenswrapper[4689]: I0123 11:32:26.911913 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:26 crc kubenswrapper[4689]: I0123 11:32:26.960388 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t47k6"] Jan 23 11:32:27 crc kubenswrapper[4689]: I0123 11:32:27.854117 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-t47k6" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="registry-server" containerID="cri-o://3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9" gracePeriod=2 Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.373724 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.450237 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-utilities\") pod \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.450524 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmhm2\" (UniqueName: \"kubernetes.io/projected/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-kube-api-access-vmhm2\") pod \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.450757 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-catalog-content\") pod \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\" (UID: \"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a\") " Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.451075 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-utilities" (OuterVolumeSpecName: "utilities") pod "7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" (UID: "7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.451561 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.461476 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-kube-api-access-vmhm2" (OuterVolumeSpecName: "kube-api-access-vmhm2") pod "7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" (UID: "7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a"). InnerVolumeSpecName "kube-api-access-vmhm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.477991 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" (UID: "7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.554131 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.554180 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmhm2\" (UniqueName: \"kubernetes.io/projected/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a-kube-api-access-vmhm2\") on node \"crc\" DevicePath \"\"" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.866470 4689 generic.go:334] "Generic (PLEG): container finished" podID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerID="3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9" exitCode=0 Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.866552 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t47k6" event={"ID":"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a","Type":"ContainerDied","Data":"3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9"} Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.866557 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t47k6" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.866873 4689 scope.go:117] "RemoveContainer" containerID="3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.866857 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t47k6" event={"ID":"7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a","Type":"ContainerDied","Data":"d1dd2f429b5523e3b2e1edd3c2a74837cd84efa417b717522b38a3efc15a9afa"} Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.893982 4689 scope.go:117] "RemoveContainer" containerID="b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8" Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.914531 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t47k6"] Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.930875 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-t47k6"] Jan 23 11:32:28 crc kubenswrapper[4689]: I0123 11:32:28.938205 4689 scope.go:117] "RemoveContainer" containerID="f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c" Jan 23 11:32:29 crc kubenswrapper[4689]: I0123 11:32:29.007794 4689 scope.go:117] "RemoveContainer" containerID="3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9" Jan 23 11:32:29 crc kubenswrapper[4689]: E0123 11:32:29.008735 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9\": container with ID starting with 3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9 not found: ID does not exist" containerID="3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9" Jan 23 11:32:29 crc kubenswrapper[4689]: I0123 11:32:29.008767 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9"} err="failed to get container status \"3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9\": rpc error: code = NotFound desc = could not find container \"3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9\": container with ID starting with 3fac2fd86ed8105f06657a1d40a2ce2115acb38f69550cda036bfbffbbcbb4a9 not found: ID does not exist" Jan 23 11:32:29 crc kubenswrapper[4689]: I0123 11:32:29.008785 4689 scope.go:117] "RemoveContainer" containerID="b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8" Jan 23 11:32:29 crc kubenswrapper[4689]: E0123 11:32:29.010440 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8\": container with ID starting with b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8 not found: ID does not exist" containerID="b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8" Jan 23 11:32:29 crc kubenswrapper[4689]: I0123 11:32:29.010500 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8"} err="failed to get container status \"b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8\": rpc error: code = NotFound desc = could not find container \"b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8\": container with ID starting with b177277312890dd102417b5cd508297ceeeac6a4371980b59991daceca1132c8 not found: ID does not exist" Jan 23 11:32:29 crc kubenswrapper[4689]: I0123 11:32:29.010534 4689 scope.go:117] "RemoveContainer" containerID="f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c" Jan 23 11:32:29 crc kubenswrapper[4689]: E0123 11:32:29.010852 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c\": container with ID starting with f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c not found: ID does not exist" containerID="f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c" Jan 23 11:32:29 crc kubenswrapper[4689]: I0123 11:32:29.010880 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c"} err="failed to get container status \"f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c\": rpc error: code = NotFound desc = could not find container \"f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c\": container with ID starting with f6121de113a83cca3684a676c116abbb42c2ca8a66b00f414d2647fdf82a870c not found: ID does not exist" Jan 23 11:32:29 crc kubenswrapper[4689]: I0123 11:32:29.653038 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" path="/var/lib/kubelet/pods/7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a/volumes" Jan 23 11:33:03 crc kubenswrapper[4689]: I0123 11:33:03.311283 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:33:03 crc kubenswrapper[4689]: I0123 11:33:03.311971 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:33:33 crc kubenswrapper[4689]: I0123 11:33:33.310565 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:33:33 crc kubenswrapper[4689]: I0123 11:33:33.311174 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.310482 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.310960 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.311013 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.311868 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e2f14954f9a6d7524a9610da965b62870acd17693d30353a3b1277b07188ee90"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.311923 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://e2f14954f9a6d7524a9610da965b62870acd17693d30353a3b1277b07188ee90" gracePeriod=600 Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.783037 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="e2f14954f9a6d7524a9610da965b62870acd17693d30353a3b1277b07188ee90" exitCode=0 Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.783403 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"e2f14954f9a6d7524a9610da965b62870acd17693d30353a3b1277b07188ee90"} Jan 23 11:34:03 crc kubenswrapper[4689]: I0123 11:34:03.783650 4689 scope.go:117] "RemoveContainer" containerID="d09e0a5eb97fac9fab2903791a6e7cc571fd701f5ecf0eaf37e1ab60ae3ae11f" Jan 23 11:34:04 crc kubenswrapper[4689]: I0123 11:34:04.801846 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91"} Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.254936 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9wwxh"] Jan 23 11:35:28 crc kubenswrapper[4689]: E0123 11:35:28.256046 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="extract-content" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.256059 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="extract-content" Jan 23 11:35:28 crc kubenswrapper[4689]: E0123 11:35:28.256080 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="registry-server" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.256086 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="registry-server" Jan 23 11:35:28 crc kubenswrapper[4689]: E0123 11:35:28.256106 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="extract-utilities" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.256114 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="extract-utilities" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.256341 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7123b0ae-3d3c-4d0c-a7c7-84f8ae0d586a" containerName="registry-server" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.258071 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.265818 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9wwxh"] Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.348079 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-utilities\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.450567 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-utilities\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.450698 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-catalog-content\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.450755 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwhrs\" (UniqueName: \"kubernetes.io/projected/ff4891bd-e8b6-400e-9197-e76a7e91914a-kube-api-access-nwhrs\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.451262 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-utilities\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.554689 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-catalog-content\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.554794 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwhrs\" (UniqueName: \"kubernetes.io/projected/ff4891bd-e8b6-400e-9197-e76a7e91914a-kube-api-access-nwhrs\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.555754 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-catalog-content\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.592791 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwhrs\" (UniqueName: \"kubernetes.io/projected/ff4891bd-e8b6-400e-9197-e76a7e91914a-kube-api-access-nwhrs\") pod \"certified-operators-9wwxh\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:28 crc kubenswrapper[4689]: I0123 11:35:28.601730 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:29 crc kubenswrapper[4689]: I0123 11:35:29.144437 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9wwxh"] Jan 23 11:35:29 crc kubenswrapper[4689]: W0123 11:35:29.147617 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff4891bd_e8b6_400e_9197_e76a7e91914a.slice/crio-0ba68b84432b0940a5ddc6906039eb0bef852d80eba045e94a0b32520c77e3d6 WatchSource:0}: Error finding container 0ba68b84432b0940a5ddc6906039eb0bef852d80eba045e94a0b32520c77e3d6: Status 404 returned error can't find the container with id 0ba68b84432b0940a5ddc6906039eb0bef852d80eba045e94a0b32520c77e3d6 Jan 23 11:35:29 crc kubenswrapper[4689]: I0123 11:35:29.801098 4689 generic.go:334] "Generic (PLEG): container finished" podID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerID="74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612" exitCode=0 Jan 23 11:35:29 crc kubenswrapper[4689]: I0123 11:35:29.801168 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9wwxh" event={"ID":"ff4891bd-e8b6-400e-9197-e76a7e91914a","Type":"ContainerDied","Data":"74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612"} Jan 23 11:35:29 crc kubenswrapper[4689]: I0123 11:35:29.801201 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9wwxh" event={"ID":"ff4891bd-e8b6-400e-9197-e76a7e91914a","Type":"ContainerStarted","Data":"0ba68b84432b0940a5ddc6906039eb0bef852d80eba045e94a0b32520c77e3d6"} Jan 23 11:35:30 crc kubenswrapper[4689]: I0123 11:35:30.814437 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9wwxh" event={"ID":"ff4891bd-e8b6-400e-9197-e76a7e91914a","Type":"ContainerStarted","Data":"adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae"} Jan 23 11:35:31 crc kubenswrapper[4689]: I0123 11:35:31.827795 4689 generic.go:334] "Generic (PLEG): container finished" podID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerID="adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae" exitCode=0 Jan 23 11:35:31 crc kubenswrapper[4689]: I0123 11:35:31.827901 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9wwxh" event={"ID":"ff4891bd-e8b6-400e-9197-e76a7e91914a","Type":"ContainerDied","Data":"adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae"} Jan 23 11:35:32 crc kubenswrapper[4689]: I0123 11:35:32.846473 4689 generic.go:334] "Generic (PLEG): container finished" podID="e5b31e85-298a-4959-b5d0-87adb59850b6" containerID="8ff941691c4f01a5f2503a0c2821c630ce2de5b40681a6167b5aad752dc1749e" exitCode=0 Jan 23 11:35:32 crc kubenswrapper[4689]: I0123 11:35:32.846502 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" event={"ID":"e5b31e85-298a-4959-b5d0-87adb59850b6","Type":"ContainerDied","Data":"8ff941691c4f01a5f2503a0c2821c630ce2de5b40681a6167b5aad752dc1749e"} Jan 23 11:35:32 crc kubenswrapper[4689]: I0123 11:35:32.851041 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9wwxh" event={"ID":"ff4891bd-e8b6-400e-9197-e76a7e91914a","Type":"ContainerStarted","Data":"730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c"} Jan 23 11:35:32 crc kubenswrapper[4689]: I0123 11:35:32.903877 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9wwxh" podStartSLOduration=2.340192326 podStartE2EDuration="4.903856313s" podCreationTimestamp="2026-01-23 11:35:28 +0000 UTC" firstStartedPulling="2026-01-23 11:35:29.804055542 +0000 UTC m=+2794.428735401" lastFinishedPulling="2026-01-23 11:35:32.367719489 +0000 UTC m=+2796.992399388" observedRunningTime="2026-01-23 11:35:32.895185568 +0000 UTC m=+2797.519865507" watchObservedRunningTime="2026-01-23 11:35:32.903856313 +0000 UTC m=+2797.528536182" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.384519 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.418780 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfcwl\" (UniqueName: \"kubernetes.io/projected/e5b31e85-298a-4959-b5d0-87adb59850b6-kube-api-access-vfcwl\") pod \"e5b31e85-298a-4959-b5d0-87adb59850b6\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.418887 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-secret-0\") pod \"e5b31e85-298a-4959-b5d0-87adb59850b6\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.418935 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-inventory\") pod \"e5b31e85-298a-4959-b5d0-87adb59850b6\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.418960 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-ssh-key-openstack-edpm-ipam\") pod \"e5b31e85-298a-4959-b5d0-87adb59850b6\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.419049 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-combined-ca-bundle\") pod \"e5b31e85-298a-4959-b5d0-87adb59850b6\" (UID: \"e5b31e85-298a-4959-b5d0-87adb59850b6\") " Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.428173 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5b31e85-298a-4959-b5d0-87adb59850b6-kube-api-access-vfcwl" (OuterVolumeSpecName: "kube-api-access-vfcwl") pod "e5b31e85-298a-4959-b5d0-87adb59850b6" (UID: "e5b31e85-298a-4959-b5d0-87adb59850b6"). InnerVolumeSpecName "kube-api-access-vfcwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.455620 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "e5b31e85-298a-4959-b5d0-87adb59850b6" (UID: "e5b31e85-298a-4959-b5d0-87adb59850b6"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.463914 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "e5b31e85-298a-4959-b5d0-87adb59850b6" (UID: "e5b31e85-298a-4959-b5d0-87adb59850b6"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.482889 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-inventory" (OuterVolumeSpecName: "inventory") pod "e5b31e85-298a-4959-b5d0-87adb59850b6" (UID: "e5b31e85-298a-4959-b5d0-87adb59850b6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.488716 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e5b31e85-298a-4959-b5d0-87adb59850b6" (UID: "e5b31e85-298a-4959-b5d0-87adb59850b6"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.523107 4689 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.523157 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfcwl\" (UniqueName: \"kubernetes.io/projected/e5b31e85-298a-4959-b5d0-87adb59850b6-kube-api-access-vfcwl\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.523168 4689 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.523178 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.523191 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e5b31e85-298a-4959-b5d0-87adb59850b6-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.918526 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" event={"ID":"e5b31e85-298a-4959-b5d0-87adb59850b6","Type":"ContainerDied","Data":"12fd67747c91199f1a3f6d6f4cd335ca2d8575f1369013ba53b048ad932abc49"} Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.918574 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12fd67747c91199f1a3f6d6f4cd335ca2d8575f1369013ba53b048ad932abc49" Jan 23 11:35:34 crc kubenswrapper[4689]: I0123 11:35:34.919040 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.043302 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh"] Jan 23 11:35:35 crc kubenswrapper[4689]: E0123 11:35:35.043868 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5b31e85-298a-4959-b5d0-87adb59850b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.043883 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5b31e85-298a-4959-b5d0-87adb59850b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.044111 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5b31e85-298a-4959-b5d0-87adb59850b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.045008 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.047317 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.047371 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.047605 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.050867 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.051227 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.051513 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.057514 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.066423 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh"] Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.141429 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwlwz\" (UniqueName: \"kubernetes.io/projected/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-kube-api-access-cwlwz\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.142770 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.142905 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.142982 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.143374 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.143528 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.143603 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.143703 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.143804 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.245718 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.245787 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.245812 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.245876 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.246532 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.246582 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwlwz\" (UniqueName: \"kubernetes.io/projected/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-kube-api-access-cwlwz\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.246642 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.246732 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.246826 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.247704 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.253066 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.253441 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.253870 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.254541 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.254685 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.255768 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.255962 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.283664 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwlwz\" (UniqueName: \"kubernetes.io/projected/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-kube-api-access-cwlwz\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fxrwh\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:35 crc kubenswrapper[4689]: I0123 11:35:35.366968 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:35:36 crc kubenswrapper[4689]: W0123 11:35:36.234448 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31a5bdbc_3b38_46f3_8e74_bdc66342ec5e.slice/crio-6b75a91ef2cd1f176a60d0d08df63ca871f158a4b25c9706ba40f964dbb08ab7 WatchSource:0}: Error finding container 6b75a91ef2cd1f176a60d0d08df63ca871f158a4b25c9706ba40f964dbb08ab7: Status 404 returned error can't find the container with id 6b75a91ef2cd1f176a60d0d08df63ca871f158a4b25c9706ba40f964dbb08ab7 Jan 23 11:35:36 crc kubenswrapper[4689]: I0123 11:35:36.234866 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh"] Jan 23 11:35:36 crc kubenswrapper[4689]: I0123 11:35:36.962761 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" event={"ID":"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e","Type":"ContainerStarted","Data":"6b75a91ef2cd1f176a60d0d08df63ca871f158a4b25c9706ba40f964dbb08ab7"} Jan 23 11:35:37 crc kubenswrapper[4689]: I0123 11:35:37.980806 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" event={"ID":"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e","Type":"ContainerStarted","Data":"b3887ecc9e69ec0f2f535eae98df39e39799451e1bd34f32924a3b81040f86c1"} Jan 23 11:35:38 crc kubenswrapper[4689]: I0123 11:35:38.010696 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" podStartSLOduration=2.02548496 podStartE2EDuration="3.010677559s" podCreationTimestamp="2026-01-23 11:35:35 +0000 UTC" firstStartedPulling="2026-01-23 11:35:36.23822668 +0000 UTC m=+2800.862906549" lastFinishedPulling="2026-01-23 11:35:37.223419259 +0000 UTC m=+2801.848099148" observedRunningTime="2026-01-23 11:35:38.006522296 +0000 UTC m=+2802.631202175" watchObservedRunningTime="2026-01-23 11:35:38.010677559 +0000 UTC m=+2802.635357418" Jan 23 11:35:38 crc kubenswrapper[4689]: I0123 11:35:38.602934 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:38 crc kubenswrapper[4689]: I0123 11:35:38.603006 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:38 crc kubenswrapper[4689]: I0123 11:35:38.661210 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:39 crc kubenswrapper[4689]: I0123 11:35:39.074761 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:39 crc kubenswrapper[4689]: I0123 11:35:39.163474 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9wwxh"] Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.016105 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9wwxh" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="registry-server" containerID="cri-o://730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c" gracePeriod=2 Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.556910 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.708682 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwhrs\" (UniqueName: \"kubernetes.io/projected/ff4891bd-e8b6-400e-9197-e76a7e91914a-kube-api-access-nwhrs\") pod \"ff4891bd-e8b6-400e-9197-e76a7e91914a\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.709010 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-utilities\") pod \"ff4891bd-e8b6-400e-9197-e76a7e91914a\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.709155 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-catalog-content\") pod \"ff4891bd-e8b6-400e-9197-e76a7e91914a\" (UID: \"ff4891bd-e8b6-400e-9197-e76a7e91914a\") " Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.712692 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-utilities" (OuterVolumeSpecName: "utilities") pod "ff4891bd-e8b6-400e-9197-e76a7e91914a" (UID: "ff4891bd-e8b6-400e-9197-e76a7e91914a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.715784 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff4891bd-e8b6-400e-9197-e76a7e91914a-kube-api-access-nwhrs" (OuterVolumeSpecName: "kube-api-access-nwhrs") pod "ff4891bd-e8b6-400e-9197-e76a7e91914a" (UID: "ff4891bd-e8b6-400e-9197-e76a7e91914a"). InnerVolumeSpecName "kube-api-access-nwhrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.751929 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff4891bd-e8b6-400e-9197-e76a7e91914a" (UID: "ff4891bd-e8b6-400e-9197-e76a7e91914a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.814979 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.815007 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwhrs\" (UniqueName: \"kubernetes.io/projected/ff4891bd-e8b6-400e-9197-e76a7e91914a-kube-api-access-nwhrs\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:41 crc kubenswrapper[4689]: I0123 11:35:41.815019 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4891bd-e8b6-400e-9197-e76a7e91914a-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.033015 4689 generic.go:334] "Generic (PLEG): container finished" podID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerID="730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c" exitCode=0 Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.033097 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9wwxh" event={"ID":"ff4891bd-e8b6-400e-9197-e76a7e91914a","Type":"ContainerDied","Data":"730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c"} Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.033186 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9wwxh" event={"ID":"ff4891bd-e8b6-400e-9197-e76a7e91914a","Type":"ContainerDied","Data":"0ba68b84432b0940a5ddc6906039eb0bef852d80eba045e94a0b32520c77e3d6"} Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.033234 4689 scope.go:117] "RemoveContainer" containerID="730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.034363 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9wwxh" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.057623 4689 scope.go:117] "RemoveContainer" containerID="adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.131998 4689 scope.go:117] "RemoveContainer" containerID="74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.139284 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9wwxh"] Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.152599 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9wwxh"] Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.155978 4689 scope.go:117] "RemoveContainer" containerID="730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c" Jan 23 11:35:42 crc kubenswrapper[4689]: E0123 11:35:42.156508 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c\": container with ID starting with 730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c not found: ID does not exist" containerID="730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.156554 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c"} err="failed to get container status \"730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c\": rpc error: code = NotFound desc = could not find container \"730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c\": container with ID starting with 730a9f95cc0e992b8dca49615457262babf0911ccd18fa5d51c825d95f70518c not found: ID does not exist" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.156575 4689 scope.go:117] "RemoveContainer" containerID="adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae" Jan 23 11:35:42 crc kubenswrapper[4689]: E0123 11:35:42.156879 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae\": container with ID starting with adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae not found: ID does not exist" containerID="adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.156939 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae"} err="failed to get container status \"adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae\": rpc error: code = NotFound desc = could not find container \"adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae\": container with ID starting with adecf5c11365dc90782f4b8835c7e40ef8c1ff213369bb05f58c7b8746c33dae not found: ID does not exist" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.156975 4689 scope.go:117] "RemoveContainer" containerID="74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612" Jan 23 11:35:42 crc kubenswrapper[4689]: E0123 11:35:42.157285 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612\": container with ID starting with 74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612 not found: ID does not exist" containerID="74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612" Jan 23 11:35:42 crc kubenswrapper[4689]: I0123 11:35:42.157310 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612"} err="failed to get container status \"74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612\": rpc error: code = NotFound desc = could not find container \"74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612\": container with ID starting with 74e6fa9db0ac8e5eab8ee4ade86b6ef04ee257b8c6e50e36947da49f54d8d612 not found: ID does not exist" Jan 23 11:35:43 crc kubenswrapper[4689]: I0123 11:35:43.666784 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" path="/var/lib/kubelet/pods/ff4891bd-e8b6-400e-9197-e76a7e91914a/volumes" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.769086 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mnwth"] Jan 23 11:35:46 crc kubenswrapper[4689]: E0123 11:35:46.770284 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="extract-utilities" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.770301 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="extract-utilities" Jan 23 11:35:46 crc kubenswrapper[4689]: E0123 11:35:46.770325 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="registry-server" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.770331 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="registry-server" Jan 23 11:35:46 crc kubenswrapper[4689]: E0123 11:35:46.770345 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="extract-content" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.770353 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="extract-content" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.770559 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff4891bd-e8b6-400e-9197-e76a7e91914a" containerName="registry-server" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.772240 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.789804 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mnwth"] Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.960403 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-utilities\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.960473 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-catalog-content\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:46 crc kubenswrapper[4689]: I0123 11:35:46.961033 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g96t8\" (UniqueName: \"kubernetes.io/projected/e3c0bd0d-53d0-4c52-b850-9b60b6292486-kube-api-access-g96t8\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.062947 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-utilities\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.063029 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-catalog-content\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.063229 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g96t8\" (UniqueName: \"kubernetes.io/projected/e3c0bd0d-53d0-4c52-b850-9b60b6292486-kube-api-access-g96t8\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.063449 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-catalog-content\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.064421 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-utilities\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.092103 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g96t8\" (UniqueName: \"kubernetes.io/projected/e3c0bd0d-53d0-4c52-b850-9b60b6292486-kube-api-access-g96t8\") pod \"redhat-operators-mnwth\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.107848 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:47 crc kubenswrapper[4689]: I0123 11:35:47.595982 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mnwth"] Jan 23 11:35:48 crc kubenswrapper[4689]: I0123 11:35:48.116524 4689 generic.go:334] "Generic (PLEG): container finished" podID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerID="798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2" exitCode=0 Jan 23 11:35:48 crc kubenswrapper[4689]: I0123 11:35:48.116861 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnwth" event={"ID":"e3c0bd0d-53d0-4c52-b850-9b60b6292486","Type":"ContainerDied","Data":"798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2"} Jan 23 11:35:48 crc kubenswrapper[4689]: I0123 11:35:48.116895 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnwth" event={"ID":"e3c0bd0d-53d0-4c52-b850-9b60b6292486","Type":"ContainerStarted","Data":"7272c78445510ec8c531e11134a6731722a6201040292a9d7b920eb649af2bb5"} Jan 23 11:35:49 crc kubenswrapper[4689]: I0123 11:35:49.131875 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnwth" event={"ID":"e3c0bd0d-53d0-4c52-b850-9b60b6292486","Type":"ContainerStarted","Data":"e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f"} Jan 23 11:35:51 crc kubenswrapper[4689]: I0123 11:35:51.979533 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x82w8"] Jan 23 11:35:51 crc kubenswrapper[4689]: I0123 11:35:51.984666 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.004432 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x82w8"] Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.098363 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9cj6\" (UniqueName: \"kubernetes.io/projected/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-kube-api-access-s9cj6\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.098791 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-catalog-content\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.099092 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-utilities\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.201660 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-catalog-content\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.201817 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-utilities\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.202098 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9cj6\" (UniqueName: \"kubernetes.io/projected/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-kube-api-access-s9cj6\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.202658 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-catalog-content\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.202854 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-utilities\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.231244 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9cj6\" (UniqueName: \"kubernetes.io/projected/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-kube-api-access-s9cj6\") pod \"community-operators-x82w8\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:52 crc kubenswrapper[4689]: I0123 11:35:52.319523 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:35:54 crc kubenswrapper[4689]: I0123 11:35:54.324077 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x82w8"] Jan 23 11:35:55 crc kubenswrapper[4689]: I0123 11:35:55.198532 4689 generic.go:334] "Generic (PLEG): container finished" podID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerID="e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f" exitCode=0 Jan 23 11:35:55 crc kubenswrapper[4689]: I0123 11:35:55.198614 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnwth" event={"ID":"e3c0bd0d-53d0-4c52-b850-9b60b6292486","Type":"ContainerDied","Data":"e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f"} Jan 23 11:35:55 crc kubenswrapper[4689]: I0123 11:35:55.201531 4689 generic.go:334] "Generic (PLEG): container finished" podID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerID="7df92ad992e3c601cf5787e19bc30707c45d2bc072ef2374dfe736fa92a0f2d9" exitCode=0 Jan 23 11:35:55 crc kubenswrapper[4689]: I0123 11:35:55.201576 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x82w8" event={"ID":"6fcbe80f-fb55-46e5-9f4f-603f0736afbf","Type":"ContainerDied","Data":"7df92ad992e3c601cf5787e19bc30707c45d2bc072ef2374dfe736fa92a0f2d9"} Jan 23 11:35:55 crc kubenswrapper[4689]: I0123 11:35:55.201597 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x82w8" event={"ID":"6fcbe80f-fb55-46e5-9f4f-603f0736afbf","Type":"ContainerStarted","Data":"0d303c5285f4132b2bc938a9331d79224d77c96bc48af81a75fae8089df20325"} Jan 23 11:35:56 crc kubenswrapper[4689]: I0123 11:35:56.213919 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnwth" event={"ID":"e3c0bd0d-53d0-4c52-b850-9b60b6292486","Type":"ContainerStarted","Data":"a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c"} Jan 23 11:35:56 crc kubenswrapper[4689]: I0123 11:35:56.216261 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x82w8" event={"ID":"6fcbe80f-fb55-46e5-9f4f-603f0736afbf","Type":"ContainerStarted","Data":"d50f3a6f8d4502f6191f9860812f422f1ccbb52a59f86137860d7cee789aeb15"} Jan 23 11:35:56 crc kubenswrapper[4689]: I0123 11:35:56.239677 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mnwth" podStartSLOduration=2.7151297530000003 podStartE2EDuration="10.239658568s" podCreationTimestamp="2026-01-23 11:35:46 +0000 UTC" firstStartedPulling="2026-01-23 11:35:48.121821691 +0000 UTC m=+2812.746501540" lastFinishedPulling="2026-01-23 11:35:55.646350496 +0000 UTC m=+2820.271030355" observedRunningTime="2026-01-23 11:35:56.229865175 +0000 UTC m=+2820.854545034" watchObservedRunningTime="2026-01-23 11:35:56.239658568 +0000 UTC m=+2820.864338427" Jan 23 11:35:57 crc kubenswrapper[4689]: I0123 11:35:57.109586 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:57 crc kubenswrapper[4689]: I0123 11:35:57.110054 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:35:57 crc kubenswrapper[4689]: I0123 11:35:57.227830 4689 generic.go:334] "Generic (PLEG): container finished" podID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerID="d50f3a6f8d4502f6191f9860812f422f1ccbb52a59f86137860d7cee789aeb15" exitCode=0 Jan 23 11:35:57 crc kubenswrapper[4689]: I0123 11:35:57.227927 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x82w8" event={"ID":"6fcbe80f-fb55-46e5-9f4f-603f0736afbf","Type":"ContainerDied","Data":"d50f3a6f8d4502f6191f9860812f422f1ccbb52a59f86137860d7cee789aeb15"} Jan 23 11:35:58 crc kubenswrapper[4689]: I0123 11:35:58.175861 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mnwth" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="registry-server" probeResult="failure" output=< Jan 23 11:35:58 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:35:58 crc kubenswrapper[4689]: > Jan 23 11:35:58 crc kubenswrapper[4689]: I0123 11:35:58.240812 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x82w8" event={"ID":"6fcbe80f-fb55-46e5-9f4f-603f0736afbf","Type":"ContainerStarted","Data":"543315af247aba4ebffc82531a8b091bec2bd87e3460681c037274b942cd9f3e"} Jan 23 11:35:58 crc kubenswrapper[4689]: I0123 11:35:58.266936 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x82w8" podStartSLOduration=4.589511036 podStartE2EDuration="7.266914424s" podCreationTimestamp="2026-01-23 11:35:51 +0000 UTC" firstStartedPulling="2026-01-23 11:35:55.202725906 +0000 UTC m=+2819.827405765" lastFinishedPulling="2026-01-23 11:35:57.880129294 +0000 UTC m=+2822.504809153" observedRunningTime="2026-01-23 11:35:58.258178317 +0000 UTC m=+2822.882858176" watchObservedRunningTime="2026-01-23 11:35:58.266914424 +0000 UTC m=+2822.891594283" Jan 23 11:36:02 crc kubenswrapper[4689]: I0123 11:36:02.319877 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:36:02 crc kubenswrapper[4689]: I0123 11:36:02.320352 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:36:02 crc kubenswrapper[4689]: I0123 11:36:02.390853 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:36:03 crc kubenswrapper[4689]: I0123 11:36:03.310813 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:36:03 crc kubenswrapper[4689]: I0123 11:36:03.311290 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:36:03 crc kubenswrapper[4689]: I0123 11:36:03.362676 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:36:06 crc kubenswrapper[4689]: I0123 11:36:06.954453 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x82w8"] Jan 23 11:36:06 crc kubenswrapper[4689]: I0123 11:36:06.955299 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x82w8" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="registry-server" containerID="cri-o://543315af247aba4ebffc82531a8b091bec2bd87e3460681c037274b942cd9f3e" gracePeriod=2 Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.347241 4689 generic.go:334] "Generic (PLEG): container finished" podID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerID="543315af247aba4ebffc82531a8b091bec2bd87e3460681c037274b942cd9f3e" exitCode=0 Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.347371 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x82w8" event={"ID":"6fcbe80f-fb55-46e5-9f4f-603f0736afbf","Type":"ContainerDied","Data":"543315af247aba4ebffc82531a8b091bec2bd87e3460681c037274b942cd9f3e"} Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.347645 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x82w8" event={"ID":"6fcbe80f-fb55-46e5-9f4f-603f0736afbf","Type":"ContainerDied","Data":"0d303c5285f4132b2bc938a9331d79224d77c96bc48af81a75fae8089df20325"} Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.347677 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d303c5285f4132b2bc938a9331d79224d77c96bc48af81a75fae8089df20325" Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.459606 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.529978 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-catalog-content\") pod \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.530223 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-utilities\") pod \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.530272 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9cj6\" (UniqueName: \"kubernetes.io/projected/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-kube-api-access-s9cj6\") pod \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\" (UID: \"6fcbe80f-fb55-46e5-9f4f-603f0736afbf\") " Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.530922 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-utilities" (OuterVolumeSpecName: "utilities") pod "6fcbe80f-fb55-46e5-9f4f-603f0736afbf" (UID: "6fcbe80f-fb55-46e5-9f4f-603f0736afbf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.531044 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.536813 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-kube-api-access-s9cj6" (OuterVolumeSpecName: "kube-api-access-s9cj6") pod "6fcbe80f-fb55-46e5-9f4f-603f0736afbf" (UID: "6fcbe80f-fb55-46e5-9f4f-603f0736afbf"). InnerVolumeSpecName "kube-api-access-s9cj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.584445 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fcbe80f-fb55-46e5-9f4f-603f0736afbf" (UID: "6fcbe80f-fb55-46e5-9f4f-603f0736afbf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.633128 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9cj6\" (UniqueName: \"kubernetes.io/projected/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-kube-api-access-s9cj6\") on node \"crc\" DevicePath \"\"" Jan 23 11:36:07 crc kubenswrapper[4689]: I0123 11:36:07.633343 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fcbe80f-fb55-46e5-9f4f-603f0736afbf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:36:08 crc kubenswrapper[4689]: I0123 11:36:08.172825 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mnwth" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="registry-server" probeResult="failure" output=< Jan 23 11:36:08 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:36:08 crc kubenswrapper[4689]: > Jan 23 11:36:08 crc kubenswrapper[4689]: I0123 11:36:08.358633 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x82w8" Jan 23 11:36:08 crc kubenswrapper[4689]: I0123 11:36:08.394638 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x82w8"] Jan 23 11:36:08 crc kubenswrapper[4689]: I0123 11:36:08.415908 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x82w8"] Jan 23 11:36:09 crc kubenswrapper[4689]: I0123 11:36:09.653663 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" path="/var/lib/kubelet/pods/6fcbe80f-fb55-46e5-9f4f-603f0736afbf/volumes" Jan 23 11:36:17 crc kubenswrapper[4689]: I0123 11:36:17.211370 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:36:17 crc kubenswrapper[4689]: I0123 11:36:17.285964 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:36:17 crc kubenswrapper[4689]: I0123 11:36:17.468318 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mnwth"] Jan 23 11:36:18 crc kubenswrapper[4689]: I0123 11:36:18.492375 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mnwth" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="registry-server" containerID="cri-o://a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c" gracePeriod=2 Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.501320 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.504329 4689 generic.go:334] "Generic (PLEG): container finished" podID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerID="a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c" exitCode=0 Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.504368 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnwth" event={"ID":"e3c0bd0d-53d0-4c52-b850-9b60b6292486","Type":"ContainerDied","Data":"a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c"} Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.504392 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnwth" event={"ID":"e3c0bd0d-53d0-4c52-b850-9b60b6292486","Type":"ContainerDied","Data":"7272c78445510ec8c531e11134a6731722a6201040292a9d7b920eb649af2bb5"} Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.504407 4689 scope.go:117] "RemoveContainer" containerID="a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.504528 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnwth" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.552542 4689 scope.go:117] "RemoveContainer" containerID="e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.577345 4689 scope.go:117] "RemoveContainer" containerID="798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.624076 4689 scope.go:117] "RemoveContainer" containerID="a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c" Jan 23 11:36:19 crc kubenswrapper[4689]: E0123 11:36:19.624558 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c\": container with ID starting with a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c not found: ID does not exist" containerID="a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.624614 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c"} err="failed to get container status \"a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c\": rpc error: code = NotFound desc = could not find container \"a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c\": container with ID starting with a552b1f47c7276f9fe405e35ac4e918167db2072c2c23956d5fc759496897a1c not found: ID does not exist" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.624640 4689 scope.go:117] "RemoveContainer" containerID="e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f" Jan 23 11:36:19 crc kubenswrapper[4689]: E0123 11:36:19.625172 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f\": container with ID starting with e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f not found: ID does not exist" containerID="e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.625200 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f"} err="failed to get container status \"e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f\": rpc error: code = NotFound desc = could not find container \"e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f\": container with ID starting with e835a51aa7797c0c23ee017a7ba8d87a767a2b5984061210a04002a99196bd6f not found: ID does not exist" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.625219 4689 scope.go:117] "RemoveContainer" containerID="798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2" Jan 23 11:36:19 crc kubenswrapper[4689]: E0123 11:36:19.625625 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2\": container with ID starting with 798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2 not found: ID does not exist" containerID="798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.625720 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2"} err="failed to get container status \"798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2\": rpc error: code = NotFound desc = could not find container \"798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2\": container with ID starting with 798243f46aa6e11832fee80f746ab06e7495744661ef7845f9b5c7eb2e2d39b2 not found: ID does not exist" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.655930 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-catalog-content\") pod \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.656203 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-utilities\") pod \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.656358 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g96t8\" (UniqueName: \"kubernetes.io/projected/e3c0bd0d-53d0-4c52-b850-9b60b6292486-kube-api-access-g96t8\") pod \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\" (UID: \"e3c0bd0d-53d0-4c52-b850-9b60b6292486\") " Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.659058 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-utilities" (OuterVolumeSpecName: "utilities") pod "e3c0bd0d-53d0-4c52-b850-9b60b6292486" (UID: "e3c0bd0d-53d0-4c52-b850-9b60b6292486"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.663528 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3c0bd0d-53d0-4c52-b850-9b60b6292486-kube-api-access-g96t8" (OuterVolumeSpecName: "kube-api-access-g96t8") pod "e3c0bd0d-53d0-4c52-b850-9b60b6292486" (UID: "e3c0bd0d-53d0-4c52-b850-9b60b6292486"). InnerVolumeSpecName "kube-api-access-g96t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.759845 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.760243 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g96t8\" (UniqueName: \"kubernetes.io/projected/e3c0bd0d-53d0-4c52-b850-9b60b6292486-kube-api-access-g96t8\") on node \"crc\" DevicePath \"\"" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.773811 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3c0bd0d-53d0-4c52-b850-9b60b6292486" (UID: "e3c0bd0d-53d0-4c52-b850-9b60b6292486"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.858103 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mnwth"] Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.867417 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3c0bd0d-53d0-4c52-b850-9b60b6292486-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:36:19 crc kubenswrapper[4689]: I0123 11:36:19.871437 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mnwth"] Jan 23 11:36:21 crc kubenswrapper[4689]: I0123 11:36:21.659311 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" path="/var/lib/kubelet/pods/e3c0bd0d-53d0-4c52-b850-9b60b6292486/volumes" Jan 23 11:36:33 crc kubenswrapper[4689]: I0123 11:36:33.312047 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:36:33 crc kubenswrapper[4689]: I0123 11:36:33.312813 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:37:03 crc kubenswrapper[4689]: I0123 11:37:03.310654 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:37:03 crc kubenswrapper[4689]: I0123 11:37:03.311083 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:37:03 crc kubenswrapper[4689]: I0123 11:37:03.311118 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:37:03 crc kubenswrapper[4689]: I0123 11:37:03.311933 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:37:03 crc kubenswrapper[4689]: I0123 11:37:03.311979 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" gracePeriod=600 Jan 23 11:37:04 crc kubenswrapper[4689]: I0123 11:37:04.085557 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" exitCode=0 Jan 23 11:37:04 crc kubenswrapper[4689]: I0123 11:37:04.085864 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91"} Jan 23 11:37:04 crc kubenswrapper[4689]: I0123 11:37:04.085936 4689 scope.go:117] "RemoveContainer" containerID="e2f14954f9a6d7524a9610da965b62870acd17693d30353a3b1277b07188ee90" Jan 23 11:37:04 crc kubenswrapper[4689]: E0123 11:37:04.515348 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:37:05 crc kubenswrapper[4689]: I0123 11:37:05.099130 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:37:05 crc kubenswrapper[4689]: E0123 11:37:05.100275 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:37:19 crc kubenswrapper[4689]: I0123 11:37:19.640237 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:37:19 crc kubenswrapper[4689]: E0123 11:37:19.640919 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:37:31 crc kubenswrapper[4689]: I0123 11:37:31.639961 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:37:31 crc kubenswrapper[4689]: E0123 11:37:31.640641 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:37:44 crc kubenswrapper[4689]: I0123 11:37:44.639817 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:37:44 crc kubenswrapper[4689]: E0123 11:37:44.640555 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:37:56 crc kubenswrapper[4689]: I0123 11:37:56.640676 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:37:56 crc kubenswrapper[4689]: E0123 11:37:56.641467 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:38:01 crc kubenswrapper[4689]: I0123 11:38:01.770601 4689 generic.go:334] "Generic (PLEG): container finished" podID="31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" containerID="b3887ecc9e69ec0f2f535eae98df39e39799451e1bd34f32924a3b81040f86c1" exitCode=0 Jan 23 11:38:01 crc kubenswrapper[4689]: I0123 11:38:01.770742 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" event={"ID":"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e","Type":"ContainerDied","Data":"b3887ecc9e69ec0f2f535eae98df39e39799451e1bd34f32924a3b81040f86c1"} Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.294878 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.409675 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-0\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.409752 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-1\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.409962 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-ssh-key-openstack-edpm-ipam\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.410056 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwlwz\" (UniqueName: \"kubernetes.io/projected/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-kube-api-access-cwlwz\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.410084 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-0\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.410167 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-extra-config-0\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.410197 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-combined-ca-bundle\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.410243 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-1\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.410305 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-inventory\") pod \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\" (UID: \"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e\") " Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.416780 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.418630 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-kube-api-access-cwlwz" (OuterVolumeSpecName: "kube-api-access-cwlwz") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "kube-api-access-cwlwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.444756 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.448080 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.454562 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.455258 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.455974 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.458094 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-inventory" (OuterVolumeSpecName: "inventory") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.467807 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" (UID: "31a5bdbc-3b38-46f3-8e74-bdc66342ec5e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513386 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwlwz\" (UniqueName: \"kubernetes.io/projected/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-kube-api-access-cwlwz\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513432 4689 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513451 4689 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513468 4689 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513485 4689 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513504 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513521 4689 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513536 4689 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.513550 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/31a5bdbc-3b38-46f3-8e74-bdc66342ec5e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.808967 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" event={"ID":"31a5bdbc-3b38-46f3-8e74-bdc66342ec5e","Type":"ContainerDied","Data":"6b75a91ef2cd1f176a60d0d08df63ca871f158a4b25c9706ba40f964dbb08ab7"} Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.809008 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b75a91ef2cd1f176a60d0d08df63ca871f158a4b25c9706ba40f964dbb08ab7" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.809049 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fxrwh" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.940194 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz"] Jan 23 11:38:03 crc kubenswrapper[4689]: E0123 11:38:03.941364 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="extract-utilities" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941392 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="extract-utilities" Jan 23 11:38:03 crc kubenswrapper[4689]: E0123 11:38:03.941430 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="registry-server" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941440 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="registry-server" Jan 23 11:38:03 crc kubenswrapper[4689]: E0123 11:38:03.941461 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="extract-content" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941471 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="extract-content" Jan 23 11:38:03 crc kubenswrapper[4689]: E0123 11:38:03.941491 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="extract-utilities" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941500 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="extract-utilities" Jan 23 11:38:03 crc kubenswrapper[4689]: E0123 11:38:03.941521 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941530 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 23 11:38:03 crc kubenswrapper[4689]: E0123 11:38:03.941554 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="registry-server" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941565 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="registry-server" Jan 23 11:38:03 crc kubenswrapper[4689]: E0123 11:38:03.941585 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="extract-content" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941594 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="extract-content" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941908 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fcbe80f-fb55-46e5-9f4f-603f0736afbf" containerName="registry-server" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941941 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3c0bd0d-53d0-4c52-b850-9b60b6292486" containerName="registry-server" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.941967 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="31a5bdbc-3b38-46f3-8e74-bdc66342ec5e" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.943078 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.947860 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.948105 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.948677 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.948934 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.948951 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:38:03 crc kubenswrapper[4689]: I0123 11:38:03.956959 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz"] Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.026990 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.027160 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.027327 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pbpm\" (UniqueName: \"kubernetes.io/projected/725556d9-a125-4022-b69d-2611524af283-kube-api-access-4pbpm\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.027412 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.027462 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.027535 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.027583 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.129476 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.129658 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pbpm\" (UniqueName: \"kubernetes.io/projected/725556d9-a125-4022-b69d-2611524af283-kube-api-access-4pbpm\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.129725 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.129753 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.129814 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.129859 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.129946 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.136000 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.137344 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.137768 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.138295 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.146828 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.149710 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.152774 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pbpm\" (UniqueName: \"kubernetes.io/projected/725556d9-a125-4022-b69d-2611524af283-kube-api-access-4pbpm\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.270854 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.853927 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz"] Jan 23 11:38:04 crc kubenswrapper[4689]: I0123 11:38:04.858562 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:38:05 crc kubenswrapper[4689]: I0123 11:38:05.850432 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" event={"ID":"725556d9-a125-4022-b69d-2611524af283","Type":"ContainerStarted","Data":"ca6ee2ed0912c63055db91adfb4c09b8fec2651c4434da5481965fae2e8aa36f"} Jan 23 11:38:05 crc kubenswrapper[4689]: I0123 11:38:05.851056 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" event={"ID":"725556d9-a125-4022-b69d-2611524af283","Type":"ContainerStarted","Data":"f753d2ee6e6c6c84b0dc4c8a45395536714f09427e2e6ee61235d22459d50c57"} Jan 23 11:38:05 crc kubenswrapper[4689]: I0123 11:38:05.879299 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" podStartSLOduration=2.42845306 podStartE2EDuration="2.879277287s" podCreationTimestamp="2026-01-23 11:38:03 +0000 UTC" firstStartedPulling="2026-01-23 11:38:04.858309528 +0000 UTC m=+2949.482989387" lastFinishedPulling="2026-01-23 11:38:05.309133745 +0000 UTC m=+2949.933813614" observedRunningTime="2026-01-23 11:38:05.874435352 +0000 UTC m=+2950.499115251" watchObservedRunningTime="2026-01-23 11:38:05.879277287 +0000 UTC m=+2950.503957156" Jan 23 11:38:08 crc kubenswrapper[4689]: I0123 11:38:08.640425 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:38:08 crc kubenswrapper[4689]: E0123 11:38:08.641089 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:38:19 crc kubenswrapper[4689]: I0123 11:38:19.641020 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:38:19 crc kubenswrapper[4689]: E0123 11:38:19.642008 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:38:34 crc kubenswrapper[4689]: I0123 11:38:34.640189 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:38:34 crc kubenswrapper[4689]: E0123 11:38:34.641013 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:38:46 crc kubenswrapper[4689]: I0123 11:38:46.640285 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:38:46 crc kubenswrapper[4689]: E0123 11:38:46.641240 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:38:59 crc kubenswrapper[4689]: I0123 11:38:59.640741 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:38:59 crc kubenswrapper[4689]: E0123 11:38:59.641864 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:39:14 crc kubenswrapper[4689]: I0123 11:39:14.640668 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:39:14 crc kubenswrapper[4689]: E0123 11:39:14.641506 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:39:28 crc kubenswrapper[4689]: I0123 11:39:28.640491 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:39:28 crc kubenswrapper[4689]: E0123 11:39:28.641394 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:39:42 crc kubenswrapper[4689]: I0123 11:39:42.641775 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:39:42 crc kubenswrapper[4689]: E0123 11:39:42.643180 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:39:53 crc kubenswrapper[4689]: I0123 11:39:53.640626 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:39:53 crc kubenswrapper[4689]: E0123 11:39:53.641725 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:40:04 crc kubenswrapper[4689]: I0123 11:40:04.640772 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:40:04 crc kubenswrapper[4689]: E0123 11:40:04.641930 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:40:15 crc kubenswrapper[4689]: I0123 11:40:15.648733 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:40:15 crc kubenswrapper[4689]: E0123 11:40:15.649473 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:40:26 crc kubenswrapper[4689]: I0123 11:40:26.640598 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:40:26 crc kubenswrapper[4689]: E0123 11:40:26.641704 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:40:36 crc kubenswrapper[4689]: I0123 11:40:36.787691 4689 generic.go:334] "Generic (PLEG): container finished" podID="725556d9-a125-4022-b69d-2611524af283" containerID="ca6ee2ed0912c63055db91adfb4c09b8fec2651c4434da5481965fae2e8aa36f" exitCode=0 Jan 23 11:40:36 crc kubenswrapper[4689]: I0123 11:40:36.787801 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" event={"ID":"725556d9-a125-4022-b69d-2611524af283","Type":"ContainerDied","Data":"ca6ee2ed0912c63055db91adfb4c09b8fec2651c4434da5481965fae2e8aa36f"} Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.348993 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.451573 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-2\") pod \"725556d9-a125-4022-b69d-2611524af283\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.451830 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-inventory\") pod \"725556d9-a125-4022-b69d-2611524af283\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.451912 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pbpm\" (UniqueName: \"kubernetes.io/projected/725556d9-a125-4022-b69d-2611524af283-kube-api-access-4pbpm\") pod \"725556d9-a125-4022-b69d-2611524af283\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.452089 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-telemetry-combined-ca-bundle\") pod \"725556d9-a125-4022-b69d-2611524af283\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.452213 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-0\") pod \"725556d9-a125-4022-b69d-2611524af283\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.452318 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-1\") pod \"725556d9-a125-4022-b69d-2611524af283\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.452473 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ssh-key-openstack-edpm-ipam\") pod \"725556d9-a125-4022-b69d-2611524af283\" (UID: \"725556d9-a125-4022-b69d-2611524af283\") " Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.460316 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/725556d9-a125-4022-b69d-2611524af283-kube-api-access-4pbpm" (OuterVolumeSpecName: "kube-api-access-4pbpm") pod "725556d9-a125-4022-b69d-2611524af283" (UID: "725556d9-a125-4022-b69d-2611524af283"). InnerVolumeSpecName "kube-api-access-4pbpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.461422 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "725556d9-a125-4022-b69d-2611524af283" (UID: "725556d9-a125-4022-b69d-2611524af283"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.507889 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "725556d9-a125-4022-b69d-2611524af283" (UID: "725556d9-a125-4022-b69d-2611524af283"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.508870 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-inventory" (OuterVolumeSpecName: "inventory") pod "725556d9-a125-4022-b69d-2611524af283" (UID: "725556d9-a125-4022-b69d-2611524af283"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.513573 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "725556d9-a125-4022-b69d-2611524af283" (UID: "725556d9-a125-4022-b69d-2611524af283"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.516320 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "725556d9-a125-4022-b69d-2611524af283" (UID: "725556d9-a125-4022-b69d-2611524af283"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.525123 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "725556d9-a125-4022-b69d-2611524af283" (UID: "725556d9-a125-4022-b69d-2611524af283"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.557541 4689 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.557597 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.557617 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.557636 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.557655 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.557679 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/725556d9-a125-4022-b69d-2611524af283-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.557699 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pbpm\" (UniqueName: \"kubernetes.io/projected/725556d9-a125-4022-b69d-2611524af283-kube-api-access-4pbpm\") on node \"crc\" DevicePath \"\"" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.824216 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" event={"ID":"725556d9-a125-4022-b69d-2611524af283","Type":"ContainerDied","Data":"f753d2ee6e6c6c84b0dc4c8a45395536714f09427e2e6ee61235d22459d50c57"} Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.824286 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f753d2ee6e6c6c84b0dc4c8a45395536714f09427e2e6ee61235d22459d50c57" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.824282 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.958478 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b"] Jan 23 11:40:38 crc kubenswrapper[4689]: E0123 11:40:38.959285 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="725556d9-a125-4022-b69d-2611524af283" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.959304 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="725556d9-a125-4022-b69d-2611524af283" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.959576 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="725556d9-a125-4022-b69d-2611524af283" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.960532 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.965080 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.965332 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.965416 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.965410 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.968403 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.968507 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.968605 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.968624 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.968783 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.968861 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lj94f\" (UniqueName: \"kubernetes.io/projected/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-kube-api-access-lj94f\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.969059 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.969095 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:38 crc kubenswrapper[4689]: I0123 11:40:38.992025 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b"] Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.078588 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.078714 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lj94f\" (UniqueName: \"kubernetes.io/projected/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-kube-api-access-lj94f\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.078907 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.078952 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.079235 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.079787 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.084112 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.084201 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.084850 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.084999 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.085407 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.089096 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.103037 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lj94f\" (UniqueName: \"kubernetes.io/projected/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-kube-api-access-lj94f\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.105007 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.284297 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:40:39 crc kubenswrapper[4689]: W0123 11:40:39.932105 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76d15661_afac_4fd0_8c0c_bb3ab33b2c29.slice/crio-a108e0fcebc54c4b124e4f87da9aa1f29ed3c3223c9ac88d5cae46377d24e215 WatchSource:0}: Error finding container a108e0fcebc54c4b124e4f87da9aa1f29ed3c3223c9ac88d5cae46377d24e215: Status 404 returned error can't find the container with id a108e0fcebc54c4b124e4f87da9aa1f29ed3c3223c9ac88d5cae46377d24e215 Jan 23 11:40:39 crc kubenswrapper[4689]: I0123 11:40:39.943597 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b"] Jan 23 11:40:40 crc kubenswrapper[4689]: I0123 11:40:40.862250 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" event={"ID":"76d15661-afac-4fd0-8c0c-bb3ab33b2c29","Type":"ContainerStarted","Data":"348f33757621f43c122d6dcb2a1436528ccffb3a461cce85151abb1ed1354ebd"} Jan 23 11:40:40 crc kubenswrapper[4689]: I0123 11:40:40.863065 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" event={"ID":"76d15661-afac-4fd0-8c0c-bb3ab33b2c29","Type":"ContainerStarted","Data":"a108e0fcebc54c4b124e4f87da9aa1f29ed3c3223c9ac88d5cae46377d24e215"} Jan 23 11:40:40 crc kubenswrapper[4689]: I0123 11:40:40.887656 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" podStartSLOduration=2.454292163 podStartE2EDuration="2.887639016s" podCreationTimestamp="2026-01-23 11:40:38 +0000 UTC" firstStartedPulling="2026-01-23 11:40:39.937557136 +0000 UTC m=+3104.562237035" lastFinishedPulling="2026-01-23 11:40:40.370904019 +0000 UTC m=+3104.995583888" observedRunningTime="2026-01-23 11:40:40.88567925 +0000 UTC m=+3105.510359129" watchObservedRunningTime="2026-01-23 11:40:40.887639016 +0000 UTC m=+3105.512318865" Jan 23 11:40:41 crc kubenswrapper[4689]: I0123 11:40:41.640066 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:40:41 crc kubenswrapper[4689]: E0123 11:40:41.640622 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:40:53 crc kubenswrapper[4689]: I0123 11:40:53.641083 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:40:53 crc kubenswrapper[4689]: E0123 11:40:53.642423 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:41:06 crc kubenswrapper[4689]: I0123 11:41:06.640544 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:41:06 crc kubenswrapper[4689]: E0123 11:41:06.641682 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:41:18 crc kubenswrapper[4689]: I0123 11:41:18.640316 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:41:18 crc kubenswrapper[4689]: E0123 11:41:18.642609 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:41:31 crc kubenswrapper[4689]: I0123 11:41:31.640644 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:41:31 crc kubenswrapper[4689]: E0123 11:41:31.641614 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:41:44 crc kubenswrapper[4689]: I0123 11:41:44.640552 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:41:44 crc kubenswrapper[4689]: E0123 11:41:44.642066 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:41:59 crc kubenswrapper[4689]: I0123 11:41:59.641062 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:41:59 crc kubenswrapper[4689]: E0123 11:41:59.642768 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.166610 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6dzs7"] Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.170197 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.191931 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6dzs7"] Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.274248 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-catalog-content\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.274378 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-utilities\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.274442 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b947m\" (UniqueName: \"kubernetes.io/projected/3c9df894-69b8-4ec0-991f-d95f0527c755-kube-api-access-b947m\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.376097 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b947m\" (UniqueName: \"kubernetes.io/projected/3c9df894-69b8-4ec0-991f-d95f0527c755-kube-api-access-b947m\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.376298 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-catalog-content\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.376381 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-utilities\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.376834 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-catalog-content\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.376902 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-utilities\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.403075 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b947m\" (UniqueName: \"kubernetes.io/projected/3c9df894-69b8-4ec0-991f-d95f0527c755-kube-api-access-b947m\") pod \"redhat-marketplace-6dzs7\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.495254 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:10 crc kubenswrapper[4689]: I0123 11:42:10.640433 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:42:10 crc kubenswrapper[4689]: W0123 11:42:10.992327 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c9df894_69b8_4ec0_991f_d95f0527c755.slice/crio-8020f9d0ed1d3549f51d2591ecbf9153323272e0aa33d024bc01f59a24a178de WatchSource:0}: Error finding container 8020f9d0ed1d3549f51d2591ecbf9153323272e0aa33d024bc01f59a24a178de: Status 404 returned error can't find the container with id 8020f9d0ed1d3549f51d2591ecbf9153323272e0aa33d024bc01f59a24a178de Jan 23 11:42:11 crc kubenswrapper[4689]: I0123 11:42:11.007686 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6dzs7"] Jan 23 11:42:11 crc kubenswrapper[4689]: I0123 11:42:11.120625 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6dzs7" event={"ID":"3c9df894-69b8-4ec0-991f-d95f0527c755","Type":"ContainerStarted","Data":"8020f9d0ed1d3549f51d2591ecbf9153323272e0aa33d024bc01f59a24a178de"} Jan 23 11:42:11 crc kubenswrapper[4689]: I0123 11:42:11.123973 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"2723ddaa0db14adcf125ad4322632a5f67306159c50ef180df029272d4f2daae"} Jan 23 11:42:12 crc kubenswrapper[4689]: I0123 11:42:12.134368 4689 generic.go:334] "Generic (PLEG): container finished" podID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerID="6faaec8906a811ee7dd42eaac9b3be16bb40363d91cb511949425e8b5b40ad04" exitCode=0 Jan 23 11:42:12 crc kubenswrapper[4689]: I0123 11:42:12.134453 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6dzs7" event={"ID":"3c9df894-69b8-4ec0-991f-d95f0527c755","Type":"ContainerDied","Data":"6faaec8906a811ee7dd42eaac9b3be16bb40363d91cb511949425e8b5b40ad04"} Jan 23 11:42:14 crc kubenswrapper[4689]: I0123 11:42:14.162272 4689 generic.go:334] "Generic (PLEG): container finished" podID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerID="8982e3f89443f01d3f2cbfa07f14aa2072f2c9961c324f6d2abea85b37253233" exitCode=0 Jan 23 11:42:14 crc kubenswrapper[4689]: I0123 11:42:14.162355 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6dzs7" event={"ID":"3c9df894-69b8-4ec0-991f-d95f0527c755","Type":"ContainerDied","Data":"8982e3f89443f01d3f2cbfa07f14aa2072f2c9961c324f6d2abea85b37253233"} Jan 23 11:42:15 crc kubenswrapper[4689]: I0123 11:42:15.194001 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6dzs7" event={"ID":"3c9df894-69b8-4ec0-991f-d95f0527c755","Type":"ContainerStarted","Data":"a6abe8d720c45774745c6831f4429308160abf78028bee703c9e341d7b639813"} Jan 23 11:42:15 crc kubenswrapper[4689]: I0123 11:42:15.231991 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6dzs7" podStartSLOduration=2.761877001 podStartE2EDuration="5.23196793s" podCreationTimestamp="2026-01-23 11:42:10 +0000 UTC" firstStartedPulling="2026-01-23 11:42:12.13645751 +0000 UTC m=+3196.761137369" lastFinishedPulling="2026-01-23 11:42:14.606548449 +0000 UTC m=+3199.231228298" observedRunningTime="2026-01-23 11:42:15.216178166 +0000 UTC m=+3199.840858025" watchObservedRunningTime="2026-01-23 11:42:15.23196793 +0000 UTC m=+3199.856647789" Jan 23 11:42:20 crc kubenswrapper[4689]: I0123 11:42:20.496126 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:20 crc kubenswrapper[4689]: I0123 11:42:20.496795 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:20 crc kubenswrapper[4689]: I0123 11:42:20.593635 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:21 crc kubenswrapper[4689]: I0123 11:42:21.333653 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:21 crc kubenswrapper[4689]: I0123 11:42:21.415432 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6dzs7"] Jan 23 11:42:23 crc kubenswrapper[4689]: I0123 11:42:23.296575 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6dzs7" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="registry-server" containerID="cri-o://a6abe8d720c45774745c6831f4429308160abf78028bee703c9e341d7b639813" gracePeriod=2 Jan 23 11:42:24 crc kubenswrapper[4689]: I0123 11:42:24.313097 4689 generic.go:334] "Generic (PLEG): container finished" podID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerID="a6abe8d720c45774745c6831f4429308160abf78028bee703c9e341d7b639813" exitCode=0 Jan 23 11:42:24 crc kubenswrapper[4689]: I0123 11:42:24.313205 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6dzs7" event={"ID":"3c9df894-69b8-4ec0-991f-d95f0527c755","Type":"ContainerDied","Data":"a6abe8d720c45774745c6831f4429308160abf78028bee703c9e341d7b639813"} Jan 23 11:42:24 crc kubenswrapper[4689]: I0123 11:42:24.935434 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.095860 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b947m\" (UniqueName: \"kubernetes.io/projected/3c9df894-69b8-4ec0-991f-d95f0527c755-kube-api-access-b947m\") pod \"3c9df894-69b8-4ec0-991f-d95f0527c755\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.095990 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-catalog-content\") pod \"3c9df894-69b8-4ec0-991f-d95f0527c755\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.096093 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-utilities\") pod \"3c9df894-69b8-4ec0-991f-d95f0527c755\" (UID: \"3c9df894-69b8-4ec0-991f-d95f0527c755\") " Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.097845 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-utilities" (OuterVolumeSpecName: "utilities") pod "3c9df894-69b8-4ec0-991f-d95f0527c755" (UID: "3c9df894-69b8-4ec0-991f-d95f0527c755"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.117377 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c9df894-69b8-4ec0-991f-d95f0527c755-kube-api-access-b947m" (OuterVolumeSpecName: "kube-api-access-b947m") pod "3c9df894-69b8-4ec0-991f-d95f0527c755" (UID: "3c9df894-69b8-4ec0-991f-d95f0527c755"). InnerVolumeSpecName "kube-api-access-b947m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.147428 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c9df894-69b8-4ec0-991f-d95f0527c755" (UID: "3c9df894-69b8-4ec0-991f-d95f0527c755"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.199870 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.199919 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c9df894-69b8-4ec0-991f-d95f0527c755-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.199933 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b947m\" (UniqueName: \"kubernetes.io/projected/3c9df894-69b8-4ec0-991f-d95f0527c755-kube-api-access-b947m\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.328354 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6dzs7" event={"ID":"3c9df894-69b8-4ec0-991f-d95f0527c755","Type":"ContainerDied","Data":"8020f9d0ed1d3549f51d2591ecbf9153323272e0aa33d024bc01f59a24a178de"} Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.328613 4689 scope.go:117] "RemoveContainer" containerID="a6abe8d720c45774745c6831f4429308160abf78028bee703c9e341d7b639813" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.328405 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6dzs7" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.362918 4689 scope.go:117] "RemoveContainer" containerID="8982e3f89443f01d3f2cbfa07f14aa2072f2c9961c324f6d2abea85b37253233" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.366020 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6dzs7"] Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.375588 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6dzs7"] Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.391389 4689 scope.go:117] "RemoveContainer" containerID="6faaec8906a811ee7dd42eaac9b3be16bb40363d91cb511949425e8b5b40ad04" Jan 23 11:42:25 crc kubenswrapper[4689]: I0123 11:42:25.655518 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" path="/var/lib/kubelet/pods/3c9df894-69b8-4ec0-991f-d95f0527c755/volumes" Jan 23 11:42:29 crc kubenswrapper[4689]: I0123 11:42:29.345631 4689 scope.go:117] "RemoveContainer" containerID="7df92ad992e3c601cf5787e19bc30707c45d2bc072ef2374dfe736fa92a0f2d9" Jan 23 11:42:29 crc kubenswrapper[4689]: I0123 11:42:29.370938 4689 scope.go:117] "RemoveContainer" containerID="543315af247aba4ebffc82531a8b091bec2bd87e3460681c037274b942cd9f3e" Jan 23 11:42:29 crc kubenswrapper[4689]: I0123 11:42:29.467174 4689 scope.go:117] "RemoveContainer" containerID="d50f3a6f8d4502f6191f9860812f422f1ccbb52a59f86137860d7cee789aeb15" Jan 23 11:42:42 crc kubenswrapper[4689]: I0123 11:42:42.575372 4689 generic.go:334] "Generic (PLEG): container finished" podID="76d15661-afac-4fd0-8c0c-bb3ab33b2c29" containerID="348f33757621f43c122d6dcb2a1436528ccffb3a461cce85151abb1ed1354ebd" exitCode=0 Jan 23 11:42:42 crc kubenswrapper[4689]: I0123 11:42:42.575544 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" event={"ID":"76d15661-afac-4fd0-8c0c-bb3ab33b2c29","Type":"ContainerDied","Data":"348f33757621f43c122d6dcb2a1436528ccffb3a461cce85151abb1ed1354ebd"} Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.095643 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.240029 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-0\") pod \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.240805 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ssh-key-openstack-edpm-ipam\") pod \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.240965 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-2\") pod \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.241093 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-inventory\") pod \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.241241 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-telemetry-power-monitoring-combined-ca-bundle\") pod \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.241305 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lj94f\" (UniqueName: \"kubernetes.io/projected/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-kube-api-access-lj94f\") pod \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.241372 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-1\") pod \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\" (UID: \"76d15661-afac-4fd0-8c0c-bb3ab33b2c29\") " Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.247879 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "76d15661-afac-4fd0-8c0c-bb3ab33b2c29" (UID: "76d15661-afac-4fd0-8c0c-bb3ab33b2c29"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.253346 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-kube-api-access-lj94f" (OuterVolumeSpecName: "kube-api-access-lj94f") pod "76d15661-afac-4fd0-8c0c-bb3ab33b2c29" (UID: "76d15661-afac-4fd0-8c0c-bb3ab33b2c29"). InnerVolumeSpecName "kube-api-access-lj94f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.276284 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "76d15661-afac-4fd0-8c0c-bb3ab33b2c29" (UID: "76d15661-afac-4fd0-8c0c-bb3ab33b2c29"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.279538 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "76d15661-afac-4fd0-8c0c-bb3ab33b2c29" (UID: "76d15661-afac-4fd0-8c0c-bb3ab33b2c29"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.281202 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "76d15661-afac-4fd0-8c0c-bb3ab33b2c29" (UID: "76d15661-afac-4fd0-8c0c-bb3ab33b2c29"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.286270 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-inventory" (OuterVolumeSpecName: "inventory") pod "76d15661-afac-4fd0-8c0c-bb3ab33b2c29" (UID: "76d15661-afac-4fd0-8c0c-bb3ab33b2c29"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.293825 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "76d15661-afac-4fd0-8c0c-bb3ab33b2c29" (UID: "76d15661-afac-4fd0-8c0c-bb3ab33b2c29"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.344010 4689 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.344046 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lj94f\" (UniqueName: \"kubernetes.io/projected/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-kube-api-access-lj94f\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.344059 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.344068 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.344077 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.344085 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.344095 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76d15661-afac-4fd0-8c0c-bb3ab33b2c29-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.602672 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" event={"ID":"76d15661-afac-4fd0-8c0c-bb3ab33b2c29","Type":"ContainerDied","Data":"a108e0fcebc54c4b124e4f87da9aa1f29ed3c3223c9ac88d5cae46377d24e215"} Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.603258 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a108e0fcebc54c4b124e4f87da9aa1f29ed3c3223c9ac88d5cae46377d24e215" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.602765 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.721440 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26"] Jan 23 11:42:44 crc kubenswrapper[4689]: E0123 11:42:44.722120 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="extract-content" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.722172 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="extract-content" Jan 23 11:42:44 crc kubenswrapper[4689]: E0123 11:42:44.722214 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76d15661-afac-4fd0-8c0c-bb3ab33b2c29" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.722229 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="76d15661-afac-4fd0-8c0c-bb3ab33b2c29" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 23 11:42:44 crc kubenswrapper[4689]: E0123 11:42:44.722274 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="registry-server" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.722288 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="registry-server" Jan 23 11:42:44 crc kubenswrapper[4689]: E0123 11:42:44.722322 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="extract-utilities" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.722335 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="extract-utilities" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.722669 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c9df894-69b8-4ec0-991f-d95f0527c755" containerName="registry-server" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.722730 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="76d15661-afac-4fd0-8c0c-bb3ab33b2c29" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.725541 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.729254 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.729365 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.729669 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.729828 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-xhqv8" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.730015 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.736420 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26"] Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.857044 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.857133 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7qwb\" (UniqueName: \"kubernetes.io/projected/92f7b41c-45c5-4ad7-b7af-7459d727e982-kube-api-access-n7qwb\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.857192 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.857218 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.857280 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.959140 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.959213 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.959248 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.959411 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.959470 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7qwb\" (UniqueName: \"kubernetes.io/projected/92f7b41c-45c5-4ad7-b7af-7459d727e982-kube-api-access-n7qwb\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.964244 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.965464 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.966984 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.966998 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:44 crc kubenswrapper[4689]: I0123 11:42:44.978302 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7qwb\" (UniqueName: \"kubernetes.io/projected/92f7b41c-45c5-4ad7-b7af-7459d727e982-kube-api-access-n7qwb\") pod \"logging-edpm-deployment-openstack-edpm-ipam-llm26\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:45 crc kubenswrapper[4689]: I0123 11:42:45.043035 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:42:45 crc kubenswrapper[4689]: I0123 11:42:45.660728 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26"] Jan 23 11:42:46 crc kubenswrapper[4689]: I0123 11:42:46.628566 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" event={"ID":"92f7b41c-45c5-4ad7-b7af-7459d727e982","Type":"ContainerStarted","Data":"fb33764f58104780304402c3b4dc620156b05c88623378e63f91b8ba70b4ae9d"} Jan 23 11:42:46 crc kubenswrapper[4689]: I0123 11:42:46.629329 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" event={"ID":"92f7b41c-45c5-4ad7-b7af-7459d727e982","Type":"ContainerStarted","Data":"9f392838357cb0a5fef005a463a401f8af39493b433fc29e1cb84e6c46dbff2f"} Jan 23 11:42:46 crc kubenswrapper[4689]: I0123 11:42:46.661224 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" podStartSLOduration=2.13470858 podStartE2EDuration="2.661201559s" podCreationTimestamp="2026-01-23 11:42:44 +0000 UTC" firstStartedPulling="2026-01-23 11:42:45.665093119 +0000 UTC m=+3230.289772998" lastFinishedPulling="2026-01-23 11:42:46.191586108 +0000 UTC m=+3230.816265977" observedRunningTime="2026-01-23 11:42:46.649169795 +0000 UTC m=+3231.273849684" watchObservedRunningTime="2026-01-23 11:42:46.661201559 +0000 UTC m=+3231.285881418" Jan 23 11:43:01 crc kubenswrapper[4689]: I0123 11:43:01.844711 4689 generic.go:334] "Generic (PLEG): container finished" podID="92f7b41c-45c5-4ad7-b7af-7459d727e982" containerID="fb33764f58104780304402c3b4dc620156b05c88623378e63f91b8ba70b4ae9d" exitCode=0 Jan 23 11:43:01 crc kubenswrapper[4689]: I0123 11:43:01.845219 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" event={"ID":"92f7b41c-45c5-4ad7-b7af-7459d727e982","Type":"ContainerDied","Data":"fb33764f58104780304402c3b4dc620156b05c88623378e63f91b8ba70b4ae9d"} Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.273734 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.427849 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-0\") pod \"92f7b41c-45c5-4ad7-b7af-7459d727e982\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.427984 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-ssh-key-openstack-edpm-ipam\") pod \"92f7b41c-45c5-4ad7-b7af-7459d727e982\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.428061 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-1\") pod \"92f7b41c-45c5-4ad7-b7af-7459d727e982\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.428440 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7qwb\" (UniqueName: \"kubernetes.io/projected/92f7b41c-45c5-4ad7-b7af-7459d727e982-kube-api-access-n7qwb\") pod \"92f7b41c-45c5-4ad7-b7af-7459d727e982\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.428629 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-inventory\") pod \"92f7b41c-45c5-4ad7-b7af-7459d727e982\" (UID: \"92f7b41c-45c5-4ad7-b7af-7459d727e982\") " Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.435989 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92f7b41c-45c5-4ad7-b7af-7459d727e982-kube-api-access-n7qwb" (OuterVolumeSpecName: "kube-api-access-n7qwb") pod "92f7b41c-45c5-4ad7-b7af-7459d727e982" (UID: "92f7b41c-45c5-4ad7-b7af-7459d727e982"). InnerVolumeSpecName "kube-api-access-n7qwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.479880 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "92f7b41c-45c5-4ad7-b7af-7459d727e982" (UID: "92f7b41c-45c5-4ad7-b7af-7459d727e982"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.485216 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "92f7b41c-45c5-4ad7-b7af-7459d727e982" (UID: "92f7b41c-45c5-4ad7-b7af-7459d727e982"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.496868 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-inventory" (OuterVolumeSpecName: "inventory") pod "92f7b41c-45c5-4ad7-b7af-7459d727e982" (UID: "92f7b41c-45c5-4ad7-b7af-7459d727e982"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.497001 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "92f7b41c-45c5-4ad7-b7af-7459d727e982" (UID: "92f7b41c-45c5-4ad7-b7af-7459d727e982"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.545183 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7qwb\" (UniqueName: \"kubernetes.io/projected/92f7b41c-45c5-4ad7-b7af-7459d727e982-kube-api-access-n7qwb\") on node \"crc\" DevicePath \"\"" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.546030 4689 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-inventory\") on node \"crc\" DevicePath \"\"" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.546058 4689 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.546080 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.546099 4689 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/92f7b41c-45c5-4ad7-b7af-7459d727e982-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.880682 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" event={"ID":"92f7b41c-45c5-4ad7-b7af-7459d727e982","Type":"ContainerDied","Data":"9f392838357cb0a5fef005a463a401f8af39493b433fc29e1cb84e6c46dbff2f"} Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.880725 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f392838357cb0a5fef005a463a401f8af39493b433fc29e1cb84e6c46dbff2f" Jan 23 11:43:03 crc kubenswrapper[4689]: I0123 11:43:03.880804 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-llm26" Jan 23 11:44:33 crc kubenswrapper[4689]: I0123 11:44:33.311576 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:44:33 crc kubenswrapper[4689]: I0123 11:44:33.312238 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.165999 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd"] Jan 23 11:45:00 crc kubenswrapper[4689]: E0123 11:45:00.167192 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f7b41c-45c5-4ad7-b7af-7459d727e982" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.167210 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f7b41c-45c5-4ad7-b7af-7459d727e982" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.167528 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="92f7b41c-45c5-4ad7-b7af-7459d727e982" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.168510 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.172320 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.179084 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.194625 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd"] Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.307732 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7463a223-0829-40b7-a389-e6a68212f828-config-volume\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.308753 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7463a223-0829-40b7-a389-e6a68212f828-secret-volume\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.309002 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s67x2\" (UniqueName: \"kubernetes.io/projected/7463a223-0829-40b7-a389-e6a68212f828-kube-api-access-s67x2\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.411508 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7463a223-0829-40b7-a389-e6a68212f828-config-volume\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.411650 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7463a223-0829-40b7-a389-e6a68212f828-secret-volume\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.411834 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s67x2\" (UniqueName: \"kubernetes.io/projected/7463a223-0829-40b7-a389-e6a68212f828-kube-api-access-s67x2\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.412427 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7463a223-0829-40b7-a389-e6a68212f828-config-volume\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.419870 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7463a223-0829-40b7-a389-e6a68212f828-secret-volume\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.430480 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s67x2\" (UniqueName: \"kubernetes.io/projected/7463a223-0829-40b7-a389-e6a68212f828-kube-api-access-s67x2\") pod \"collect-profiles-29486145-qqtmd\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.508061 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:00 crc kubenswrapper[4689]: I0123 11:45:00.981753 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd"] Jan 23 11:45:01 crc kubenswrapper[4689]: I0123 11:45:01.398159 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" event={"ID":"7463a223-0829-40b7-a389-e6a68212f828","Type":"ContainerStarted","Data":"ab95b9bf9606ec26bfdd485043f74bebfe45614b1911bf5bdd7f74429ba0c577"} Jan 23 11:45:01 crc kubenswrapper[4689]: I0123 11:45:01.398216 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" event={"ID":"7463a223-0829-40b7-a389-e6a68212f828","Type":"ContainerStarted","Data":"655b9f8364b47e4d40a5dc69837b288948fda4545b5b128e090c297506d6c7d8"} Jan 23 11:45:01 crc kubenswrapper[4689]: I0123 11:45:01.426662 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" podStartSLOduration=1.426644297 podStartE2EDuration="1.426644297s" podCreationTimestamp="2026-01-23 11:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 11:45:01.421641414 +0000 UTC m=+3366.046321273" watchObservedRunningTime="2026-01-23 11:45:01.426644297 +0000 UTC m=+3366.051324156" Jan 23 11:45:02 crc kubenswrapper[4689]: I0123 11:45:02.413900 4689 generic.go:334] "Generic (PLEG): container finished" podID="7463a223-0829-40b7-a389-e6a68212f828" containerID="ab95b9bf9606ec26bfdd485043f74bebfe45614b1911bf5bdd7f74429ba0c577" exitCode=0 Jan 23 11:45:02 crc kubenswrapper[4689]: I0123 11:45:02.413998 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" event={"ID":"7463a223-0829-40b7-a389-e6a68212f828","Type":"ContainerDied","Data":"ab95b9bf9606ec26bfdd485043f74bebfe45614b1911bf5bdd7f74429ba0c577"} Jan 23 11:45:03 crc kubenswrapper[4689]: I0123 11:45:03.310670 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:45:03 crc kubenswrapper[4689]: I0123 11:45:03.310946 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:45:03 crc kubenswrapper[4689]: I0123 11:45:03.917594 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.044802 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s67x2\" (UniqueName: \"kubernetes.io/projected/7463a223-0829-40b7-a389-e6a68212f828-kube-api-access-s67x2\") pod \"7463a223-0829-40b7-a389-e6a68212f828\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.045148 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7463a223-0829-40b7-a389-e6a68212f828-secret-volume\") pod \"7463a223-0829-40b7-a389-e6a68212f828\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.045336 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7463a223-0829-40b7-a389-e6a68212f828-config-volume\") pod \"7463a223-0829-40b7-a389-e6a68212f828\" (UID: \"7463a223-0829-40b7-a389-e6a68212f828\") " Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.045838 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7463a223-0829-40b7-a389-e6a68212f828-config-volume" (OuterVolumeSpecName: "config-volume") pod "7463a223-0829-40b7-a389-e6a68212f828" (UID: "7463a223-0829-40b7-a389-e6a68212f828"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.046524 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7463a223-0829-40b7-a389-e6a68212f828-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.053410 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7463a223-0829-40b7-a389-e6a68212f828-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7463a223-0829-40b7-a389-e6a68212f828" (UID: "7463a223-0829-40b7-a389-e6a68212f828"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.065799 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7463a223-0829-40b7-a389-e6a68212f828-kube-api-access-s67x2" (OuterVolumeSpecName: "kube-api-access-s67x2") pod "7463a223-0829-40b7-a389-e6a68212f828" (UID: "7463a223-0829-40b7-a389-e6a68212f828"). InnerVolumeSpecName "kube-api-access-s67x2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.151691 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s67x2\" (UniqueName: \"kubernetes.io/projected/7463a223-0829-40b7-a389-e6a68212f828-kube-api-access-s67x2\") on node \"crc\" DevicePath \"\"" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.151731 4689 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7463a223-0829-40b7-a389-e6a68212f828-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.446923 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" event={"ID":"7463a223-0829-40b7-a389-e6a68212f828","Type":"ContainerDied","Data":"655b9f8364b47e4d40a5dc69837b288948fda4545b5b128e090c297506d6c7d8"} Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.446963 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="655b9f8364b47e4d40a5dc69837b288948fda4545b5b128e090c297506d6c7d8" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.447068 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486145-qqtmd" Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.503566 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28"] Jan 23 11:45:04 crc kubenswrapper[4689]: I0123 11:45:04.515781 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486100-zgc28"] Jan 23 11:45:05 crc kubenswrapper[4689]: I0123 11:45:05.665473 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b" path="/var/lib/kubelet/pods/0bb7b5e2-2db1-41a4-9e83-aafb7b2d928b/volumes" Jan 23 11:45:29 crc kubenswrapper[4689]: I0123 11:45:29.723331 4689 scope.go:117] "RemoveContainer" containerID="29da1e0721d15f43f6dfc43aa8e1186fee507ee59229a0ac5c8ca295f8383100" Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.311358 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.311966 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.312016 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.312986 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2723ddaa0db14adcf125ad4322632a5f67306159c50ef180df029272d4f2daae"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.313062 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://2723ddaa0db14adcf125ad4322632a5f67306159c50ef180df029272d4f2daae" gracePeriod=600 Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.823971 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="2723ddaa0db14adcf125ad4322632a5f67306159c50ef180df029272d4f2daae" exitCode=0 Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.824202 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"2723ddaa0db14adcf125ad4322632a5f67306159c50ef180df029272d4f2daae"} Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.824533 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948"} Jan 23 11:45:33 crc kubenswrapper[4689]: I0123 11:45:33.824565 4689 scope.go:117] "RemoveContainer" containerID="a78f86d0ead4d0b1546a9dfd18d6d855fd2cb42fe572dc21979831a4b02e9f91" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.487303 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kwk9s"] Jan 23 11:45:52 crc kubenswrapper[4689]: E0123 11:45:52.488552 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7463a223-0829-40b7-a389-e6a68212f828" containerName="collect-profiles" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.488575 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="7463a223-0829-40b7-a389-e6a68212f828" containerName="collect-profiles" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.489010 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="7463a223-0829-40b7-a389-e6a68212f828" containerName="collect-profiles" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.493982 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.514453 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kwk9s"] Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.553396 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gpjp\" (UniqueName: \"kubernetes.io/projected/155eb6e4-2deb-4a94-bf4b-c87f4d637193-kube-api-access-9gpjp\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.554112 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-catalog-content\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.554396 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-utilities\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.656415 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-catalog-content\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.656512 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-utilities\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.656624 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gpjp\" (UniqueName: \"kubernetes.io/projected/155eb6e4-2deb-4a94-bf4b-c87f4d637193-kube-api-access-9gpjp\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.657189 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-catalog-content\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.657193 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-utilities\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.695992 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gpjp\" (UniqueName: \"kubernetes.io/projected/155eb6e4-2deb-4a94-bf4b-c87f4d637193-kube-api-access-9gpjp\") pod \"certified-operators-kwk9s\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:52 crc kubenswrapper[4689]: I0123 11:45:52.828877 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:45:53 crc kubenswrapper[4689]: I0123 11:45:53.438068 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kwk9s"] Jan 23 11:45:54 crc kubenswrapper[4689]: I0123 11:45:54.127868 4689 generic.go:334] "Generic (PLEG): container finished" podID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerID="8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05" exitCode=0 Jan 23 11:45:54 crc kubenswrapper[4689]: I0123 11:45:54.127941 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwk9s" event={"ID":"155eb6e4-2deb-4a94-bf4b-c87f4d637193","Type":"ContainerDied","Data":"8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05"} Jan 23 11:45:54 crc kubenswrapper[4689]: I0123 11:45:54.128187 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwk9s" event={"ID":"155eb6e4-2deb-4a94-bf4b-c87f4d637193","Type":"ContainerStarted","Data":"f0d9dd1d5d5d9a53c65716f7f143654087a61f9cc712a226aaadb1bde648a403"} Jan 23 11:45:54 crc kubenswrapper[4689]: I0123 11:45:54.130959 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:45:56 crc kubenswrapper[4689]: I0123 11:45:56.155438 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwk9s" event={"ID":"155eb6e4-2deb-4a94-bf4b-c87f4d637193","Type":"ContainerStarted","Data":"f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265"} Jan 23 11:45:57 crc kubenswrapper[4689]: I0123 11:45:57.175283 4689 generic.go:334] "Generic (PLEG): container finished" podID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerID="f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265" exitCode=0 Jan 23 11:45:57 crc kubenswrapper[4689]: I0123 11:45:57.175327 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwk9s" event={"ID":"155eb6e4-2deb-4a94-bf4b-c87f4d637193","Type":"ContainerDied","Data":"f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265"} Jan 23 11:45:58 crc kubenswrapper[4689]: I0123 11:45:58.185943 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwk9s" event={"ID":"155eb6e4-2deb-4a94-bf4b-c87f4d637193","Type":"ContainerStarted","Data":"ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8"} Jan 23 11:45:58 crc kubenswrapper[4689]: I0123 11:45:58.216608 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kwk9s" podStartSLOduration=2.757458867 podStartE2EDuration="6.216592474s" podCreationTimestamp="2026-01-23 11:45:52 +0000 UTC" firstStartedPulling="2026-01-23 11:45:54.130756415 +0000 UTC m=+3418.755436284" lastFinishedPulling="2026-01-23 11:45:57.589890012 +0000 UTC m=+3422.214569891" observedRunningTime="2026-01-23 11:45:58.21402132 +0000 UTC m=+3422.838701179" watchObservedRunningTime="2026-01-23 11:45:58.216592474 +0000 UTC m=+3422.841272333" Jan 23 11:46:02 crc kubenswrapper[4689]: I0123 11:46:02.829207 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:46:02 crc kubenswrapper[4689]: I0123 11:46:02.829658 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:46:02 crc kubenswrapper[4689]: I0123 11:46:02.880856 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:46:03 crc kubenswrapper[4689]: I0123 11:46:03.313048 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:46:03 crc kubenswrapper[4689]: I0123 11:46:03.376321 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kwk9s"] Jan 23 11:46:05 crc kubenswrapper[4689]: I0123 11:46:05.280607 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kwk9s" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="registry-server" containerID="cri-o://ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8" gracePeriod=2 Jan 23 11:46:05 crc kubenswrapper[4689]: I0123 11:46:05.894573 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:46:05 crc kubenswrapper[4689]: I0123 11:46:05.979815 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gpjp\" (UniqueName: \"kubernetes.io/projected/155eb6e4-2deb-4a94-bf4b-c87f4d637193-kube-api-access-9gpjp\") pod \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " Jan 23 11:46:05 crc kubenswrapper[4689]: I0123 11:46:05.979943 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-utilities\") pod \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " Jan 23 11:46:05 crc kubenswrapper[4689]: I0123 11:46:05.980034 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-catalog-content\") pod \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\" (UID: \"155eb6e4-2deb-4a94-bf4b-c87f4d637193\") " Jan 23 11:46:05 crc kubenswrapper[4689]: I0123 11:46:05.981744 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-utilities" (OuterVolumeSpecName: "utilities") pod "155eb6e4-2deb-4a94-bf4b-c87f4d637193" (UID: "155eb6e4-2deb-4a94-bf4b-c87f4d637193"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:46:05 crc kubenswrapper[4689]: I0123 11:46:05.986918 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/155eb6e4-2deb-4a94-bf4b-c87f4d637193-kube-api-access-9gpjp" (OuterVolumeSpecName: "kube-api-access-9gpjp") pod "155eb6e4-2deb-4a94-bf4b-c87f4d637193" (UID: "155eb6e4-2deb-4a94-bf4b-c87f4d637193"). InnerVolumeSpecName "kube-api-access-9gpjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.020637 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "155eb6e4-2deb-4a94-bf4b-c87f4d637193" (UID: "155eb6e4-2deb-4a94-bf4b-c87f4d637193"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.083671 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.083731 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/155eb6e4-2deb-4a94-bf4b-c87f4d637193-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.083749 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gpjp\" (UniqueName: \"kubernetes.io/projected/155eb6e4-2deb-4a94-bf4b-c87f4d637193-kube-api-access-9gpjp\") on node \"crc\" DevicePath \"\"" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.299547 4689 generic.go:334] "Generic (PLEG): container finished" podID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerID="ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8" exitCode=0 Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.299625 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwk9s" event={"ID":"155eb6e4-2deb-4a94-bf4b-c87f4d637193","Type":"ContainerDied","Data":"ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8"} Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.299667 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kwk9s" event={"ID":"155eb6e4-2deb-4a94-bf4b-c87f4d637193","Type":"ContainerDied","Data":"f0d9dd1d5d5d9a53c65716f7f143654087a61f9cc712a226aaadb1bde648a403"} Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.299692 4689 scope.go:117] "RemoveContainer" containerID="ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.299947 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kwk9s" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.351120 4689 scope.go:117] "RemoveContainer" containerID="f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.355433 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kwk9s"] Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.374756 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kwk9s"] Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.393384 4689 scope.go:117] "RemoveContainer" containerID="8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.446946 4689 scope.go:117] "RemoveContainer" containerID="ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8" Jan 23 11:46:06 crc kubenswrapper[4689]: E0123 11:46:06.447607 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8\": container with ID starting with ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8 not found: ID does not exist" containerID="ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.447670 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8"} err="failed to get container status \"ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8\": rpc error: code = NotFound desc = could not find container \"ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8\": container with ID starting with ac340297a638ed288c0b4a714e10e38b504131e37c2b0233d34f3748eb486de8 not found: ID does not exist" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.447711 4689 scope.go:117] "RemoveContainer" containerID="f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265" Jan 23 11:46:06 crc kubenswrapper[4689]: E0123 11:46:06.448214 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265\": container with ID starting with f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265 not found: ID does not exist" containerID="f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.448248 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265"} err="failed to get container status \"f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265\": rpc error: code = NotFound desc = could not find container \"f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265\": container with ID starting with f732e47c9db3a30501b0679924cb774beff9d90a1ea5e41683b7a28aa9cb3265 not found: ID does not exist" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.448272 4689 scope.go:117] "RemoveContainer" containerID="8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05" Jan 23 11:46:06 crc kubenswrapper[4689]: E0123 11:46:06.448664 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05\": container with ID starting with 8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05 not found: ID does not exist" containerID="8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05" Jan 23 11:46:06 crc kubenswrapper[4689]: I0123 11:46:06.448697 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05"} err="failed to get container status \"8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05\": rpc error: code = NotFound desc = could not find container \"8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05\": container with ID starting with 8853f38a40e272987a186c2bbcf98339d2d6bae107c760903c14871c1b4e0a05 not found: ID does not exist" Jan 23 11:46:07 crc kubenswrapper[4689]: I0123 11:46:07.659765 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" path="/var/lib/kubelet/pods/155eb6e4-2deb-4a94-bf4b-c87f4d637193/volumes" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.098108 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rh229"] Jan 23 11:46:38 crc kubenswrapper[4689]: E0123 11:46:38.100188 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="extract-content" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.100222 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="extract-content" Jan 23 11:46:38 crc kubenswrapper[4689]: E0123 11:46:38.100246 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="registry-server" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.100259 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="registry-server" Jan 23 11:46:38 crc kubenswrapper[4689]: E0123 11:46:38.100292 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="extract-utilities" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.100307 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="extract-utilities" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.100780 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="155eb6e4-2deb-4a94-bf4b-c87f4d637193" containerName="registry-server" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.104274 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.139392 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rh229"] Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.198702 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-catalog-content\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.199055 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cfvv\" (UniqueName: \"kubernetes.io/projected/600e3384-5b92-4e60-a355-22cfe39da0b0-kube-api-access-8cfvv\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.199330 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-utilities\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.302521 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-catalog-content\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.302855 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cfvv\" (UniqueName: \"kubernetes.io/projected/600e3384-5b92-4e60-a355-22cfe39da0b0-kube-api-access-8cfvv\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.302986 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-utilities\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.303530 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-utilities\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.303871 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-catalog-content\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.324741 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cfvv\" (UniqueName: \"kubernetes.io/projected/600e3384-5b92-4e60-a355-22cfe39da0b0-kube-api-access-8cfvv\") pod \"redhat-operators-rh229\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.454309 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:38 crc kubenswrapper[4689]: I0123 11:46:38.944309 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rh229"] Jan 23 11:46:39 crc kubenswrapper[4689]: I0123 11:46:39.019414 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh229" event={"ID":"600e3384-5b92-4e60-a355-22cfe39da0b0","Type":"ContainerStarted","Data":"380ee05d5dec6046ac84b2c9ec6baa478fd6ca1fca05e4fa726d82a5428ee307"} Jan 23 11:46:40 crc kubenswrapper[4689]: I0123 11:46:40.033560 4689 generic.go:334] "Generic (PLEG): container finished" podID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerID="48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59" exitCode=0 Jan 23 11:46:40 crc kubenswrapper[4689]: I0123 11:46:40.033884 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh229" event={"ID":"600e3384-5b92-4e60-a355-22cfe39da0b0","Type":"ContainerDied","Data":"48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59"} Jan 23 11:46:41 crc kubenswrapper[4689]: I0123 11:46:41.048195 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh229" event={"ID":"600e3384-5b92-4e60-a355-22cfe39da0b0","Type":"ContainerStarted","Data":"cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3"} Jan 23 11:46:44 crc kubenswrapper[4689]: E0123 11:46:44.910424 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod600e3384_5b92_4e60_a355_22cfe39da0b0.slice/crio-conmon-cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod600e3384_5b92_4e60_a355_22cfe39da0b0.slice/crio-cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3.scope\": RecentStats: unable to find data in memory cache]" Jan 23 11:46:45 crc kubenswrapper[4689]: I0123 11:46:45.098566 4689 generic.go:334] "Generic (PLEG): container finished" podID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerID="cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3" exitCode=0 Jan 23 11:46:45 crc kubenswrapper[4689]: I0123 11:46:45.098672 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh229" event={"ID":"600e3384-5b92-4e60-a355-22cfe39da0b0","Type":"ContainerDied","Data":"cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3"} Jan 23 11:46:46 crc kubenswrapper[4689]: I0123 11:46:46.109991 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh229" event={"ID":"600e3384-5b92-4e60-a355-22cfe39da0b0","Type":"ContainerStarted","Data":"73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df"} Jan 23 11:46:46 crc kubenswrapper[4689]: I0123 11:46:46.136764 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rh229" podStartSLOduration=2.600846308 podStartE2EDuration="8.136745349s" podCreationTimestamp="2026-01-23 11:46:38 +0000 UTC" firstStartedPulling="2026-01-23 11:46:40.035834388 +0000 UTC m=+3464.660514287" lastFinishedPulling="2026-01-23 11:46:45.571733459 +0000 UTC m=+3470.196413328" observedRunningTime="2026-01-23 11:46:46.125196304 +0000 UTC m=+3470.749876173" watchObservedRunningTime="2026-01-23 11:46:46.136745349 +0000 UTC m=+3470.761425208" Jan 23 11:46:48 crc kubenswrapper[4689]: I0123 11:46:48.454849 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:48 crc kubenswrapper[4689]: I0123 11:46:48.456308 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:49 crc kubenswrapper[4689]: I0123 11:46:49.530764 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rh229" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="registry-server" probeResult="failure" output=< Jan 23 11:46:49 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:46:49 crc kubenswrapper[4689]: > Jan 23 11:46:58 crc kubenswrapper[4689]: I0123 11:46:58.538054 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:58 crc kubenswrapper[4689]: I0123 11:46:58.609086 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:46:58 crc kubenswrapper[4689]: I0123 11:46:58.776861 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rh229"] Jan 23 11:47:00 crc kubenswrapper[4689]: I0123 11:47:00.276617 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rh229" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="registry-server" containerID="cri-o://73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df" gracePeriod=2 Jan 23 11:47:00 crc kubenswrapper[4689]: I0123 11:47:00.830323 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:47:00 crc kubenswrapper[4689]: I0123 11:47:00.955069 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-utilities\") pod \"600e3384-5b92-4e60-a355-22cfe39da0b0\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " Jan 23 11:47:00 crc kubenswrapper[4689]: I0123 11:47:00.955578 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cfvv\" (UniqueName: \"kubernetes.io/projected/600e3384-5b92-4e60-a355-22cfe39da0b0-kube-api-access-8cfvv\") pod \"600e3384-5b92-4e60-a355-22cfe39da0b0\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " Jan 23 11:47:00 crc kubenswrapper[4689]: I0123 11:47:00.955631 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-catalog-content\") pod \"600e3384-5b92-4e60-a355-22cfe39da0b0\" (UID: \"600e3384-5b92-4e60-a355-22cfe39da0b0\") " Jan 23 11:47:00 crc kubenswrapper[4689]: I0123 11:47:00.957613 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-utilities" (OuterVolumeSpecName: "utilities") pod "600e3384-5b92-4e60-a355-22cfe39da0b0" (UID: "600e3384-5b92-4e60-a355-22cfe39da0b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:47:00 crc kubenswrapper[4689]: I0123 11:47:00.968046 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/600e3384-5b92-4e60-a355-22cfe39da0b0-kube-api-access-8cfvv" (OuterVolumeSpecName: "kube-api-access-8cfvv") pod "600e3384-5b92-4e60-a355-22cfe39da0b0" (UID: "600e3384-5b92-4e60-a355-22cfe39da0b0"). InnerVolumeSpecName "kube-api-access-8cfvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.057552 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.057582 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cfvv\" (UniqueName: \"kubernetes.io/projected/600e3384-5b92-4e60-a355-22cfe39da0b0-kube-api-access-8cfvv\") on node \"crc\" DevicePath \"\"" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.074593 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "600e3384-5b92-4e60-a355-22cfe39da0b0" (UID: "600e3384-5b92-4e60-a355-22cfe39da0b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.162133 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/600e3384-5b92-4e60-a355-22cfe39da0b0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.292448 4689 generic.go:334] "Generic (PLEG): container finished" podID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerID="73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df" exitCode=0 Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.292522 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh229" event={"ID":"600e3384-5b92-4e60-a355-22cfe39da0b0","Type":"ContainerDied","Data":"73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df"} Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.292610 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh229" event={"ID":"600e3384-5b92-4e60-a355-22cfe39da0b0","Type":"ContainerDied","Data":"380ee05d5dec6046ac84b2c9ec6baa478fd6ca1fca05e4fa726d82a5428ee307"} Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.292614 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh229" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.292725 4689 scope.go:117] "RemoveContainer" containerID="73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.334712 4689 scope.go:117] "RemoveContainer" containerID="cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.336848 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rh229"] Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.348056 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rh229"] Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.373852 4689 scope.go:117] "RemoveContainer" containerID="48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.456615 4689 scope.go:117] "RemoveContainer" containerID="73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df" Jan 23 11:47:01 crc kubenswrapper[4689]: E0123 11:47:01.457296 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df\": container with ID starting with 73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df not found: ID does not exist" containerID="73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.457347 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df"} err="failed to get container status \"73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df\": rpc error: code = NotFound desc = could not find container \"73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df\": container with ID starting with 73f14dd4c6206482d932cad62f9a54eb17ca219e03b5d47c3dcf735694d6c9df not found: ID does not exist" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.457385 4689 scope.go:117] "RemoveContainer" containerID="cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3" Jan 23 11:47:01 crc kubenswrapper[4689]: E0123 11:47:01.458085 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3\": container with ID starting with cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3 not found: ID does not exist" containerID="cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.458183 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3"} err="failed to get container status \"cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3\": rpc error: code = NotFound desc = could not find container \"cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3\": container with ID starting with cd12db0d933b8bfb647fd31d56c7de7540b39aaae38c39bced6979a484553be3 not found: ID does not exist" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.458232 4689 scope.go:117] "RemoveContainer" containerID="48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59" Jan 23 11:47:01 crc kubenswrapper[4689]: E0123 11:47:01.458734 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59\": container with ID starting with 48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59 not found: ID does not exist" containerID="48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.458785 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59"} err="failed to get container status \"48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59\": rpc error: code = NotFound desc = could not find container \"48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59\": container with ID starting with 48cefd5848b49c50c467fc98735c9abaa3dab0bb0429e212a0ecd8103989ec59 not found: ID does not exist" Jan 23 11:47:01 crc kubenswrapper[4689]: I0123 11:47:01.660340 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" path="/var/lib/kubelet/pods/600e3384-5b92-4e60-a355-22cfe39da0b0/volumes" Jan 23 11:47:33 crc kubenswrapper[4689]: I0123 11:47:33.311593 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:47:33 crc kubenswrapper[4689]: I0123 11:47:33.312447 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:48:03 crc kubenswrapper[4689]: I0123 11:48:03.310625 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:48:03 crc kubenswrapper[4689]: I0123 11:48:03.311397 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:48:33 crc kubenswrapper[4689]: I0123 11:48:33.310522 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:48:33 crc kubenswrapper[4689]: I0123 11:48:33.310990 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:48:33 crc kubenswrapper[4689]: I0123 11:48:33.311035 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:48:33 crc kubenswrapper[4689]: I0123 11:48:33.311836 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:48:33 crc kubenswrapper[4689]: I0123 11:48:33.311882 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" gracePeriod=600 Jan 23 11:48:33 crc kubenswrapper[4689]: E0123 11:48:33.435362 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:48:34 crc kubenswrapper[4689]: I0123 11:48:34.124367 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" exitCode=0 Jan 23 11:48:34 crc kubenswrapper[4689]: I0123 11:48:34.124791 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948"} Jan 23 11:48:34 crc kubenswrapper[4689]: I0123 11:48:34.124946 4689 scope.go:117] "RemoveContainer" containerID="2723ddaa0db14adcf125ad4322632a5f67306159c50ef180df029272d4f2daae" Jan 23 11:48:34 crc kubenswrapper[4689]: I0123 11:48:34.126991 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:48:34 crc kubenswrapper[4689]: E0123 11:48:34.128414 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:48:48 crc kubenswrapper[4689]: I0123 11:48:48.642105 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:48:48 crc kubenswrapper[4689]: E0123 11:48:48.643231 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:49:03 crc kubenswrapper[4689]: I0123 11:49:03.641331 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:49:03 crc kubenswrapper[4689]: E0123 11:49:03.642688 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:49:17 crc kubenswrapper[4689]: I0123 11:49:17.641176 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:49:17 crc kubenswrapper[4689]: E0123 11:49:17.642348 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:49:32 crc kubenswrapper[4689]: I0123 11:49:32.641061 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:49:32 crc kubenswrapper[4689]: E0123 11:49:32.641875 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:49:46 crc kubenswrapper[4689]: I0123 11:49:46.642213 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:49:46 crc kubenswrapper[4689]: E0123 11:49:46.643738 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:49:57 crc kubenswrapper[4689]: I0123 11:49:57.640297 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:49:57 crc kubenswrapper[4689]: E0123 11:49:57.641559 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.133639 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wslrd"] Jan 23 11:50:02 crc kubenswrapper[4689]: E0123 11:50:02.134716 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="registry-server" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.134733 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="registry-server" Jan 23 11:50:02 crc kubenswrapper[4689]: E0123 11:50:02.134772 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="extract-utilities" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.134782 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="extract-utilities" Jan 23 11:50:02 crc kubenswrapper[4689]: E0123 11:50:02.134794 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="extract-content" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.134805 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="extract-content" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.135074 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="600e3384-5b92-4e60-a355-22cfe39da0b0" containerName="registry-server" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.137010 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.181392 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wslrd"] Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.258270 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-utilities\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.258389 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vttlh\" (UniqueName: \"kubernetes.io/projected/255d4930-6cbb-4b98-8518-fe2aa4efec56-kube-api-access-vttlh\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.258427 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-catalog-content\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.360789 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vttlh\" (UniqueName: \"kubernetes.io/projected/255d4930-6cbb-4b98-8518-fe2aa4efec56-kube-api-access-vttlh\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.360862 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-catalog-content\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.361115 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-utilities\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.361554 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-catalog-content\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.361610 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-utilities\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.400135 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vttlh\" (UniqueName: \"kubernetes.io/projected/255d4930-6cbb-4b98-8518-fe2aa4efec56-kube-api-access-vttlh\") pod \"community-operators-wslrd\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:02 crc kubenswrapper[4689]: I0123 11:50:02.476067 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:03 crc kubenswrapper[4689]: I0123 11:50:03.103390 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wslrd"] Jan 23 11:50:03 crc kubenswrapper[4689]: W0123 11:50:03.109520 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod255d4930_6cbb_4b98_8518_fe2aa4efec56.slice/crio-f1a62352b49f5617221191acec8f88c5ac800d225ebc433f58b2d5244eb13239 WatchSource:0}: Error finding container f1a62352b49f5617221191acec8f88c5ac800d225ebc433f58b2d5244eb13239: Status 404 returned error can't find the container with id f1a62352b49f5617221191acec8f88c5ac800d225ebc433f58b2d5244eb13239 Jan 23 11:50:03 crc kubenswrapper[4689]: I0123 11:50:03.279993 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wslrd" event={"ID":"255d4930-6cbb-4b98-8518-fe2aa4efec56","Type":"ContainerStarted","Data":"f1a62352b49f5617221191acec8f88c5ac800d225ebc433f58b2d5244eb13239"} Jan 23 11:50:04 crc kubenswrapper[4689]: I0123 11:50:04.288995 4689 generic.go:334] "Generic (PLEG): container finished" podID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerID="6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c" exitCode=0 Jan 23 11:50:04 crc kubenswrapper[4689]: I0123 11:50:04.289088 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wslrd" event={"ID":"255d4930-6cbb-4b98-8518-fe2aa4efec56","Type":"ContainerDied","Data":"6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c"} Jan 23 11:50:05 crc kubenswrapper[4689]: I0123 11:50:05.316948 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wslrd" event={"ID":"255d4930-6cbb-4b98-8518-fe2aa4efec56","Type":"ContainerStarted","Data":"74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3"} Jan 23 11:50:07 crc kubenswrapper[4689]: I0123 11:50:07.345728 4689 generic.go:334] "Generic (PLEG): container finished" podID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerID="74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3" exitCode=0 Jan 23 11:50:07 crc kubenswrapper[4689]: I0123 11:50:07.345813 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wslrd" event={"ID":"255d4930-6cbb-4b98-8518-fe2aa4efec56","Type":"ContainerDied","Data":"74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3"} Jan 23 11:50:08 crc kubenswrapper[4689]: I0123 11:50:08.361676 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wslrd" event={"ID":"255d4930-6cbb-4b98-8518-fe2aa4efec56","Type":"ContainerStarted","Data":"84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c"} Jan 23 11:50:08 crc kubenswrapper[4689]: I0123 11:50:08.389253 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wslrd" podStartSLOduration=2.897923439 podStartE2EDuration="6.389230598s" podCreationTimestamp="2026-01-23 11:50:02 +0000 UTC" firstStartedPulling="2026-01-23 11:50:04.290909352 +0000 UTC m=+3668.915589211" lastFinishedPulling="2026-01-23 11:50:07.782216511 +0000 UTC m=+3672.406896370" observedRunningTime="2026-01-23 11:50:08.379489228 +0000 UTC m=+3673.004169097" watchObservedRunningTime="2026-01-23 11:50:08.389230598 +0000 UTC m=+3673.013910467" Jan 23 11:50:08 crc kubenswrapper[4689]: I0123 11:50:08.640992 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:50:08 crc kubenswrapper[4689]: E0123 11:50:08.641672 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:50:12 crc kubenswrapper[4689]: I0123 11:50:12.478052 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:12 crc kubenswrapper[4689]: I0123 11:50:12.478707 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:12 crc kubenswrapper[4689]: I0123 11:50:12.571257 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:13 crc kubenswrapper[4689]: I0123 11:50:13.522698 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:14 crc kubenswrapper[4689]: I0123 11:50:14.718281 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wslrd"] Jan 23 11:50:15 crc kubenswrapper[4689]: I0123 11:50:15.457670 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wslrd" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="registry-server" containerID="cri-o://84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c" gracePeriod=2 Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.080570 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.154096 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vttlh\" (UniqueName: \"kubernetes.io/projected/255d4930-6cbb-4b98-8518-fe2aa4efec56-kube-api-access-vttlh\") pod \"255d4930-6cbb-4b98-8518-fe2aa4efec56\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.154416 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-catalog-content\") pod \"255d4930-6cbb-4b98-8518-fe2aa4efec56\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.154539 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-utilities\") pod \"255d4930-6cbb-4b98-8518-fe2aa4efec56\" (UID: \"255d4930-6cbb-4b98-8518-fe2aa4efec56\") " Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.155820 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-utilities" (OuterVolumeSpecName: "utilities") pod "255d4930-6cbb-4b98-8518-fe2aa4efec56" (UID: "255d4930-6cbb-4b98-8518-fe2aa4efec56"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.163446 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/255d4930-6cbb-4b98-8518-fe2aa4efec56-kube-api-access-vttlh" (OuterVolumeSpecName: "kube-api-access-vttlh") pod "255d4930-6cbb-4b98-8518-fe2aa4efec56" (UID: "255d4930-6cbb-4b98-8518-fe2aa4efec56"). InnerVolumeSpecName "kube-api-access-vttlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.221477 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "255d4930-6cbb-4b98-8518-fe2aa4efec56" (UID: "255d4930-6cbb-4b98-8518-fe2aa4efec56"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.257409 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.257701 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vttlh\" (UniqueName: \"kubernetes.io/projected/255d4930-6cbb-4b98-8518-fe2aa4efec56-kube-api-access-vttlh\") on node \"crc\" DevicePath \"\"" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.257715 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/255d4930-6cbb-4b98-8518-fe2aa4efec56-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.479677 4689 generic.go:334] "Generic (PLEG): container finished" podID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerID="84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c" exitCode=0 Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.479745 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wslrd" event={"ID":"255d4930-6cbb-4b98-8518-fe2aa4efec56","Type":"ContainerDied","Data":"84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c"} Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.479785 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wslrd" event={"ID":"255d4930-6cbb-4b98-8518-fe2aa4efec56","Type":"ContainerDied","Data":"f1a62352b49f5617221191acec8f88c5ac800d225ebc433f58b2d5244eb13239"} Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.479813 4689 scope.go:117] "RemoveContainer" containerID="84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.479998 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wslrd" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.524518 4689 scope.go:117] "RemoveContainer" containerID="74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.545572 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wslrd"] Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.555605 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wslrd"] Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.611835 4689 scope.go:117] "RemoveContainer" containerID="6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.641932 4689 scope.go:117] "RemoveContainer" containerID="84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c" Jan 23 11:50:16 crc kubenswrapper[4689]: E0123 11:50:16.642455 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c\": container with ID starting with 84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c not found: ID does not exist" containerID="84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.642563 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c"} err="failed to get container status \"84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c\": rpc error: code = NotFound desc = could not find container \"84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c\": container with ID starting with 84d8de76587b07db71388c8c3b44e235f9c5fe5088ee9de488e75faeff5ef95c not found: ID does not exist" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.642622 4689 scope.go:117] "RemoveContainer" containerID="74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3" Jan 23 11:50:16 crc kubenswrapper[4689]: E0123 11:50:16.643291 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3\": container with ID starting with 74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3 not found: ID does not exist" containerID="74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.643321 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3"} err="failed to get container status \"74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3\": rpc error: code = NotFound desc = could not find container \"74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3\": container with ID starting with 74c574344f3f71d39fdc30ac0001440423bde7ae02d645f61bcc5f62ada34ad3 not found: ID does not exist" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.643342 4689 scope.go:117] "RemoveContainer" containerID="6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c" Jan 23 11:50:16 crc kubenswrapper[4689]: E0123 11:50:16.643928 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c\": container with ID starting with 6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c not found: ID does not exist" containerID="6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c" Jan 23 11:50:16 crc kubenswrapper[4689]: I0123 11:50:16.644234 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c"} err="failed to get container status \"6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c\": rpc error: code = NotFound desc = could not find container \"6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c\": container with ID starting with 6ab19cf37ad4d4accf211a4656042a6724d9007dd4c1325fb625ec76447e2b0c not found: ID does not exist" Jan 23 11:50:17 crc kubenswrapper[4689]: I0123 11:50:17.651188 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" path="/var/lib/kubelet/pods/255d4930-6cbb-4b98-8518-fe2aa4efec56/volumes" Jan 23 11:50:20 crc kubenswrapper[4689]: I0123 11:50:20.640786 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:50:20 crc kubenswrapper[4689]: E0123 11:50:20.641661 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:50:35 crc kubenswrapper[4689]: I0123 11:50:35.653670 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:50:35 crc kubenswrapper[4689]: E0123 11:50:35.654552 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:50:47 crc kubenswrapper[4689]: I0123 11:50:47.642801 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:50:47 crc kubenswrapper[4689]: E0123 11:50:47.644810 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:51:02 crc kubenswrapper[4689]: I0123 11:51:02.641386 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:51:02 crc kubenswrapper[4689]: E0123 11:51:02.642433 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:51:16 crc kubenswrapper[4689]: I0123 11:51:16.641064 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:51:16 crc kubenswrapper[4689]: E0123 11:51:16.645753 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:51:30 crc kubenswrapper[4689]: I0123 11:51:30.640969 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:51:30 crc kubenswrapper[4689]: E0123 11:51:30.641904 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:51:45 crc kubenswrapper[4689]: I0123 11:51:45.651750 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:51:45 crc kubenswrapper[4689]: E0123 11:51:45.652756 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:51:59 crc kubenswrapper[4689]: I0123 11:51:59.641336 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:51:59 crc kubenswrapper[4689]: E0123 11:51:59.644038 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:52:13 crc kubenswrapper[4689]: I0123 11:52:13.640517 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:52:13 crc kubenswrapper[4689]: E0123 11:52:13.641712 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:52:26 crc kubenswrapper[4689]: I0123 11:52:26.640490 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:52:26 crc kubenswrapper[4689]: E0123 11:52:26.643434 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:52:39 crc kubenswrapper[4689]: I0123 11:52:39.640351 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:52:39 crc kubenswrapper[4689]: E0123 11:52:39.641200 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.399407 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-26wzm"] Jan 23 11:52:41 crc kubenswrapper[4689]: E0123 11:52:41.400701 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="registry-server" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.400728 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="registry-server" Jan 23 11:52:41 crc kubenswrapper[4689]: E0123 11:52:41.400763 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="extract-content" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.400775 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="extract-content" Jan 23 11:52:41 crc kubenswrapper[4689]: E0123 11:52:41.400801 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="extract-utilities" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.400815 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="extract-utilities" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.401143 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="255d4930-6cbb-4b98-8518-fe2aa4efec56" containerName="registry-server" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.403357 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.424506 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-26wzm"] Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.516371 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-catalog-content\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.516967 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkddt\" (UniqueName: \"kubernetes.io/projected/40d9c3cb-7898-42ab-b612-3f9327680ade-kube-api-access-pkddt\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.517108 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-utilities\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.619296 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-utilities\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.619429 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-catalog-content\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.619577 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkddt\" (UniqueName: \"kubernetes.io/projected/40d9c3cb-7898-42ab-b612-3f9327680ade-kube-api-access-pkddt\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.619801 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-utilities\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.620030 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-catalog-content\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.646776 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkddt\" (UniqueName: \"kubernetes.io/projected/40d9c3cb-7898-42ab-b612-3f9327680ade-kube-api-access-pkddt\") pod \"redhat-marketplace-26wzm\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:41 crc kubenswrapper[4689]: I0123 11:52:41.738757 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:42 crc kubenswrapper[4689]: I0123 11:52:42.268425 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-26wzm"] Jan 23 11:52:42 crc kubenswrapper[4689]: W0123 11:52:42.280172 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238 WatchSource:0}: Error finding container 4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238: Status 404 returned error can't find the container with id 4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238 Jan 23 11:52:42 crc kubenswrapper[4689]: I0123 11:52:42.480145 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-26wzm" event={"ID":"40d9c3cb-7898-42ab-b612-3f9327680ade","Type":"ContainerStarted","Data":"4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238"} Jan 23 11:52:43 crc kubenswrapper[4689]: I0123 11:52:43.497248 4689 generic.go:334] "Generic (PLEG): container finished" podID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerID="217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77" exitCode=0 Jan 23 11:52:43 crc kubenswrapper[4689]: I0123 11:52:43.497367 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-26wzm" event={"ID":"40d9c3cb-7898-42ab-b612-3f9327680ade","Type":"ContainerDied","Data":"217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77"} Jan 23 11:52:43 crc kubenswrapper[4689]: I0123 11:52:43.502687 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:52:45 crc kubenswrapper[4689]: I0123 11:52:45.533717 4689 generic.go:334] "Generic (PLEG): container finished" podID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerID="6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820" exitCode=0 Jan 23 11:52:45 crc kubenswrapper[4689]: I0123 11:52:45.534315 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-26wzm" event={"ID":"40d9c3cb-7898-42ab-b612-3f9327680ade","Type":"ContainerDied","Data":"6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820"} Jan 23 11:52:46 crc kubenswrapper[4689]: I0123 11:52:46.559451 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-26wzm" event={"ID":"40d9c3cb-7898-42ab-b612-3f9327680ade","Type":"ContainerStarted","Data":"82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1"} Jan 23 11:52:46 crc kubenswrapper[4689]: I0123 11:52:46.613765 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-26wzm" podStartSLOduration=3.073859482 podStartE2EDuration="5.613737374s" podCreationTimestamp="2026-01-23 11:52:41 +0000 UTC" firstStartedPulling="2026-01-23 11:52:43.50025305 +0000 UTC m=+3828.124932919" lastFinishedPulling="2026-01-23 11:52:46.040130912 +0000 UTC m=+3830.664810811" observedRunningTime="2026-01-23 11:52:46.586235726 +0000 UTC m=+3831.210915595" watchObservedRunningTime="2026-01-23 11:52:46.613737374 +0000 UTC m=+3831.238417263" Jan 23 11:52:51 crc kubenswrapper[4689]: I0123 11:52:51.739232 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:51 crc kubenswrapper[4689]: I0123 11:52:51.741100 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:51 crc kubenswrapper[4689]: I0123 11:52:51.816862 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:52 crc kubenswrapper[4689]: I0123 11:52:52.712774 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:52 crc kubenswrapper[4689]: I0123 11:52:52.763933 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-26wzm"] Jan 23 11:52:53 crc kubenswrapper[4689]: I0123 11:52:53.639990 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:52:53 crc kubenswrapper[4689]: E0123 11:52:53.640600 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:52:54 crc kubenswrapper[4689]: I0123 11:52:54.676772 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-26wzm" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="registry-server" containerID="cri-o://82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1" gracePeriod=2 Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.263977 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.438410 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-utilities\") pod \"40d9c3cb-7898-42ab-b612-3f9327680ade\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.438559 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkddt\" (UniqueName: \"kubernetes.io/projected/40d9c3cb-7898-42ab-b612-3f9327680ade-kube-api-access-pkddt\") pod \"40d9c3cb-7898-42ab-b612-3f9327680ade\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.438760 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-catalog-content\") pod \"40d9c3cb-7898-42ab-b612-3f9327680ade\" (UID: \"40d9c3cb-7898-42ab-b612-3f9327680ade\") " Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.441721 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-utilities" (OuterVolumeSpecName: "utilities") pod "40d9c3cb-7898-42ab-b612-3f9327680ade" (UID: "40d9c3cb-7898-42ab-b612-3f9327680ade"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.450830 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40d9c3cb-7898-42ab-b612-3f9327680ade-kube-api-access-pkddt" (OuterVolumeSpecName: "kube-api-access-pkddt") pod "40d9c3cb-7898-42ab-b612-3f9327680ade" (UID: "40d9c3cb-7898-42ab-b612-3f9327680ade"). InnerVolumeSpecName "kube-api-access-pkddt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.464737 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40d9c3cb-7898-42ab-b612-3f9327680ade" (UID: "40d9c3cb-7898-42ab-b612-3f9327680ade"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.541673 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkddt\" (UniqueName: \"kubernetes.io/projected/40d9c3cb-7898-42ab-b612-3f9327680ade-kube-api-access-pkddt\") on node \"crc\" DevicePath \"\"" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.542064 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.542073 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40d9c3cb-7898-42ab-b612-3f9327680ade-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.688685 4689 generic.go:334] "Generic (PLEG): container finished" podID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerID="82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1" exitCode=0 Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.688724 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-26wzm" event={"ID":"40d9c3cb-7898-42ab-b612-3f9327680ade","Type":"ContainerDied","Data":"82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1"} Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.688752 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-26wzm" event={"ID":"40d9c3cb-7898-42ab-b612-3f9327680ade","Type":"ContainerDied","Data":"4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238"} Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.688770 4689 scope.go:117] "RemoveContainer" containerID="82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.688796 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-26wzm" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.721065 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-26wzm"] Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.723763 4689 scope.go:117] "RemoveContainer" containerID="6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.735616 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-26wzm"] Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.745787 4689 scope.go:117] "RemoveContainer" containerID="217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.803679 4689 scope.go:117] "RemoveContainer" containerID="82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1" Jan 23 11:52:55 crc kubenswrapper[4689]: E0123 11:52:55.804202 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1\": container with ID starting with 82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1 not found: ID does not exist" containerID="82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.804243 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1"} err="failed to get container status \"82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1\": rpc error: code = NotFound desc = could not find container \"82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1\": container with ID starting with 82c309b72f8969fdd3e565fd14fdb28a2fb6721b1a5b4989db9b11bb803c71e1 not found: ID does not exist" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.804275 4689 scope.go:117] "RemoveContainer" containerID="6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820" Jan 23 11:52:55 crc kubenswrapper[4689]: E0123 11:52:55.804672 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820\": container with ID starting with 6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820 not found: ID does not exist" containerID="6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.804709 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820"} err="failed to get container status \"6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820\": rpc error: code = NotFound desc = could not find container \"6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820\": container with ID starting with 6063504b778aa8cebd16f5de4206b2c13f8418fd9e798dbd30a9f4db15923820 not found: ID does not exist" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.804732 4689 scope.go:117] "RemoveContainer" containerID="217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77" Jan 23 11:52:55 crc kubenswrapper[4689]: E0123 11:52:55.805671 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77\": container with ID starting with 217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77 not found: ID does not exist" containerID="217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77" Jan 23 11:52:55 crc kubenswrapper[4689]: I0123 11:52:55.805695 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77"} err="failed to get container status \"217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77\": rpc error: code = NotFound desc = could not find container \"217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77\": container with ID starting with 217726379deddf5188d04c44ecba49971ed8a1e2ab14fdb3a99645b4a0e6ee77 not found: ID does not exist" Jan 23 11:52:57 crc kubenswrapper[4689]: I0123 11:52:57.659369 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" path="/var/lib/kubelet/pods/40d9c3cb-7898-42ab-b612-3f9327680ade/volumes" Jan 23 11:53:02 crc kubenswrapper[4689]: E0123 11:53:02.960985 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:03 crc kubenswrapper[4689]: E0123 11:53:03.748924 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:05 crc kubenswrapper[4689]: I0123 11:53:05.668483 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:53:05 crc kubenswrapper[4689]: E0123 11:53:05.679781 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:53:14 crc kubenswrapper[4689]: E0123 11:53:14.067313 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:15 crc kubenswrapper[4689]: I0123 11:53:15.682643 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Jan 23 11:53:17 crc kubenswrapper[4689]: E0123 11:53:17.963751 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:18 crc kubenswrapper[4689]: I0123 11:53:18.640642 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:53:18 crc kubenswrapper[4689]: E0123 11:53:18.641223 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:53:24 crc kubenswrapper[4689]: E0123 11:53:24.408845 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:30 crc kubenswrapper[4689]: I0123 11:53:30.640681 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:53:30 crc kubenswrapper[4689]: E0123 11:53:30.641839 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 11:53:33 crc kubenswrapper[4689]: E0123 11:53:33.252235 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:34 crc kubenswrapper[4689]: E0123 11:53:34.455539 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:43 crc kubenswrapper[4689]: I0123 11:53:43.640458 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:53:44 crc kubenswrapper[4689]: I0123 11:53:44.330756 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"f4d10285ac6014cc648261077f0c20be3dc8143f4a324757f06d68fef8b377d1"} Jan 23 11:53:44 crc kubenswrapper[4689]: E0123 11:53:44.744849 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:47 crc kubenswrapper[4689]: E0123 11:53:47.967800 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:48 crc kubenswrapper[4689]: E0123 11:53:48.106377 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:48 crc kubenswrapper[4689]: E0123 11:53:48.106590 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:55 crc kubenswrapper[4689]: E0123 11:53:55.054921 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice/crio-4fa18bac42ed401f0c3f41fa7c40edc4ff3835d937150e959ed8b2dc29cf8238\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40d9c3cb_7898_42ab_b612_3f9327680ade.slice\": RecentStats: unable to find data in memory cache]" Jan 23 11:53:55 crc kubenswrapper[4689]: E0123 11:53:55.688376 4689 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/4d98dbf3d356490497e06a0034257991fd138a92982cde21a0cd56d3e84df6cd/diff" to get inode usage: stat /var/lib/containers/storage/overlay/4d98dbf3d356490497e06a0034257991fd138a92982cde21a0cd56d3e84df6cd/diff: no such file or directory, extraDiskErr: Jan 23 11:56:03 crc kubenswrapper[4689]: I0123 11:56:03.311485 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:56:03 crc kubenswrapper[4689]: I0123 11:56:03.312117 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.561876 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-t7nrf"] Jan 23 11:56:16 crc kubenswrapper[4689]: E0123 11:56:16.562710 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="registry-server" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.562721 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="registry-server" Jan 23 11:56:16 crc kubenswrapper[4689]: E0123 11:56:16.562733 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="extract-content" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.562739 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="extract-content" Jan 23 11:56:16 crc kubenswrapper[4689]: E0123 11:56:16.562790 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="extract-utilities" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.562795 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="extract-utilities" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.562999 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="40d9c3cb-7898-42ab-b612-3f9327680ade" containerName="registry-server" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.564541 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.586164 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t7nrf"] Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.675022 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xkpl\" (UniqueName: \"kubernetes.io/projected/bfd3e234-af88-49e5-a24e-0cdb3732de91-kube-api-access-2xkpl\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.675126 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-utilities\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.675391 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-catalog-content\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.778199 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xkpl\" (UniqueName: \"kubernetes.io/projected/bfd3e234-af88-49e5-a24e-0cdb3732de91-kube-api-access-2xkpl\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.778292 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-utilities\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.778436 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-catalog-content\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.778821 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-utilities\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.778845 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-catalog-content\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.804297 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xkpl\" (UniqueName: \"kubernetes.io/projected/bfd3e234-af88-49e5-a24e-0cdb3732de91-kube-api-access-2xkpl\") pod \"certified-operators-t7nrf\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:16 crc kubenswrapper[4689]: I0123 11:56:16.892638 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:17 crc kubenswrapper[4689]: I0123 11:56:17.529914 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t7nrf"] Jan 23 11:56:18 crc kubenswrapper[4689]: I0123 11:56:18.307277 4689 generic.go:334] "Generic (PLEG): container finished" podID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerID="2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c" exitCode=0 Jan 23 11:56:18 crc kubenswrapper[4689]: I0123 11:56:18.307352 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t7nrf" event={"ID":"bfd3e234-af88-49e5-a24e-0cdb3732de91","Type":"ContainerDied","Data":"2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c"} Jan 23 11:56:18 crc kubenswrapper[4689]: I0123 11:56:18.307657 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t7nrf" event={"ID":"bfd3e234-af88-49e5-a24e-0cdb3732de91","Type":"ContainerStarted","Data":"24d14f7f8b411b7f0719c78d58cbcf4d480c530f7fbe2a4a209ce15961631922"} Jan 23 11:56:21 crc kubenswrapper[4689]: I0123 11:56:21.343513 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t7nrf" event={"ID":"bfd3e234-af88-49e5-a24e-0cdb3732de91","Type":"ContainerStarted","Data":"5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6"} Jan 23 11:56:22 crc kubenswrapper[4689]: I0123 11:56:22.355817 4689 generic.go:334] "Generic (PLEG): container finished" podID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerID="5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6" exitCode=0 Jan 23 11:56:22 crc kubenswrapper[4689]: I0123 11:56:22.355873 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t7nrf" event={"ID":"bfd3e234-af88-49e5-a24e-0cdb3732de91","Type":"ContainerDied","Data":"5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6"} Jan 23 11:56:23 crc kubenswrapper[4689]: I0123 11:56:23.371791 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t7nrf" event={"ID":"bfd3e234-af88-49e5-a24e-0cdb3732de91","Type":"ContainerStarted","Data":"138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655"} Jan 23 11:56:23 crc kubenswrapper[4689]: I0123 11:56:23.403618 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-t7nrf" podStartSLOduration=2.914310115 podStartE2EDuration="7.40359299s" podCreationTimestamp="2026-01-23 11:56:16 +0000 UTC" firstStartedPulling="2026-01-23 11:56:18.309545121 +0000 UTC m=+4042.934224980" lastFinishedPulling="2026-01-23 11:56:22.798827986 +0000 UTC m=+4047.423507855" observedRunningTime="2026-01-23 11:56:23.392692511 +0000 UTC m=+4048.017372360" watchObservedRunningTime="2026-01-23 11:56:23.40359299 +0000 UTC m=+4048.028272859" Jan 23 11:56:26 crc kubenswrapper[4689]: I0123 11:56:26.893060 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:26 crc kubenswrapper[4689]: I0123 11:56:26.894719 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:26 crc kubenswrapper[4689]: I0123 11:56:26.959422 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:28 crc kubenswrapper[4689]: I0123 11:56:28.481109 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:28 crc kubenswrapper[4689]: I0123 11:56:28.548418 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t7nrf"] Jan 23 11:56:30 crc kubenswrapper[4689]: I0123 11:56:30.439420 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-t7nrf" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="registry-server" containerID="cri-o://138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655" gracePeriod=2 Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.204040 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.263590 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-utilities\") pod \"bfd3e234-af88-49e5-a24e-0cdb3732de91\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.263848 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xkpl\" (UniqueName: \"kubernetes.io/projected/bfd3e234-af88-49e5-a24e-0cdb3732de91-kube-api-access-2xkpl\") pod \"bfd3e234-af88-49e5-a24e-0cdb3732de91\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.263905 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-catalog-content\") pod \"bfd3e234-af88-49e5-a24e-0cdb3732de91\" (UID: \"bfd3e234-af88-49e5-a24e-0cdb3732de91\") " Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.264485 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-utilities" (OuterVolumeSpecName: "utilities") pod "bfd3e234-af88-49e5-a24e-0cdb3732de91" (UID: "bfd3e234-af88-49e5-a24e-0cdb3732de91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.269394 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfd3e234-af88-49e5-a24e-0cdb3732de91-kube-api-access-2xkpl" (OuterVolumeSpecName: "kube-api-access-2xkpl") pod "bfd3e234-af88-49e5-a24e-0cdb3732de91" (UID: "bfd3e234-af88-49e5-a24e-0cdb3732de91"). InnerVolumeSpecName "kube-api-access-2xkpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.315727 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bfd3e234-af88-49e5-a24e-0cdb3732de91" (UID: "bfd3e234-af88-49e5-a24e-0cdb3732de91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.366752 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.366783 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xkpl\" (UniqueName: \"kubernetes.io/projected/bfd3e234-af88-49e5-a24e-0cdb3732de91-kube-api-access-2xkpl\") on node \"crc\" DevicePath \"\"" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.366794 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd3e234-af88-49e5-a24e-0cdb3732de91-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.453495 4689 generic.go:334] "Generic (PLEG): container finished" podID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerID="138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655" exitCode=0 Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.453552 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t7nrf" event={"ID":"bfd3e234-af88-49e5-a24e-0cdb3732de91","Type":"ContainerDied","Data":"138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655"} Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.453569 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t7nrf" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.453598 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t7nrf" event={"ID":"bfd3e234-af88-49e5-a24e-0cdb3732de91","Type":"ContainerDied","Data":"24d14f7f8b411b7f0719c78d58cbcf4d480c530f7fbe2a4a209ce15961631922"} Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.453648 4689 scope.go:117] "RemoveContainer" containerID="138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.504577 4689 scope.go:117] "RemoveContainer" containerID="5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.522694 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t7nrf"] Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.534446 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-t7nrf"] Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.543749 4689 scope.go:117] "RemoveContainer" containerID="2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.612707 4689 scope.go:117] "RemoveContainer" containerID="138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655" Jan 23 11:56:31 crc kubenswrapper[4689]: E0123 11:56:31.613281 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655\": container with ID starting with 138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655 not found: ID does not exist" containerID="138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.613322 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655"} err="failed to get container status \"138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655\": rpc error: code = NotFound desc = could not find container \"138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655\": container with ID starting with 138c6436de190f162cfcdbce0432859069241e7ddceb7c35c338d228f5191655 not found: ID does not exist" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.613350 4689 scope.go:117] "RemoveContainer" containerID="5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6" Jan 23 11:56:31 crc kubenswrapper[4689]: E0123 11:56:31.613608 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6\": container with ID starting with 5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6 not found: ID does not exist" containerID="5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.613636 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6"} err="failed to get container status \"5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6\": rpc error: code = NotFound desc = could not find container \"5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6\": container with ID starting with 5c07a244c3f06f2171cad8a5daf6fc66bd6a70c9d6d29ed0adb86e1417aa40a6 not found: ID does not exist" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.613650 4689 scope.go:117] "RemoveContainer" containerID="2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c" Jan 23 11:56:31 crc kubenswrapper[4689]: E0123 11:56:31.614289 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c\": container with ID starting with 2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c not found: ID does not exist" containerID="2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.614320 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c"} err="failed to get container status \"2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c\": rpc error: code = NotFound desc = could not find container \"2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c\": container with ID starting with 2efc8017e7f186511a2adb50ffbfc59caaea85e7784a0f38d2c797ca317b157c not found: ID does not exist" Jan 23 11:56:31 crc kubenswrapper[4689]: I0123 11:56:31.660162 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" path="/var/lib/kubelet/pods/bfd3e234-af88-49e5-a24e-0cdb3732de91/volumes" Jan 23 11:56:33 crc kubenswrapper[4689]: I0123 11:56:33.310707 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:56:33 crc kubenswrapper[4689]: I0123 11:56:33.312736 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.310580 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.313190 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.313364 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.314550 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f4d10285ac6014cc648261077f0c20be3dc8143f4a324757f06d68fef8b377d1"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.314740 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://f4d10285ac6014cc648261077f0c20be3dc8143f4a324757f06d68fef8b377d1" gracePeriod=600 Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.862042 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="f4d10285ac6014cc648261077f0c20be3dc8143f4a324757f06d68fef8b377d1" exitCode=0 Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.862351 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"f4d10285ac6014cc648261077f0c20be3dc8143f4a324757f06d68fef8b377d1"} Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.862380 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1"} Jan 23 11:57:03 crc kubenswrapper[4689]: I0123 11:57:03.862394 4689 scope.go:117] "RemoveContainer" containerID="8c0dabf4db304e8cb6d64e5aadd9edbc84cefbe97ed356d22a587f35dac09948" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.586678 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lppl5"] Jan 23 11:57:56 crc kubenswrapper[4689]: E0123 11:57:56.588019 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="extract-content" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.588043 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="extract-content" Jan 23 11:57:56 crc kubenswrapper[4689]: E0123 11:57:56.588097 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="registry-server" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.588110 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="registry-server" Jan 23 11:57:56 crc kubenswrapper[4689]: E0123 11:57:56.588190 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="extract-utilities" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.588213 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="extract-utilities" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.588669 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd3e234-af88-49e5-a24e-0cdb3732de91" containerName="registry-server" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.591233 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.601436 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lppl5"] Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.792189 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-utilities\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.792669 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-catalog-content\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.792903 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8qtx\" (UniqueName: \"kubernetes.io/projected/889bc7cd-6141-496c-a98b-36cce981ca99-kube-api-access-z8qtx\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.895726 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8qtx\" (UniqueName: \"kubernetes.io/projected/889bc7cd-6141-496c-a98b-36cce981ca99-kube-api-access-z8qtx\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.895991 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-utilities\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.896094 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-catalog-content\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.896581 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-utilities\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.896643 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-catalog-content\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.915644 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8qtx\" (UniqueName: \"kubernetes.io/projected/889bc7cd-6141-496c-a98b-36cce981ca99-kube-api-access-z8qtx\") pod \"redhat-operators-lppl5\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:56 crc kubenswrapper[4689]: I0123 11:57:56.949994 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:57:57 crc kubenswrapper[4689]: I0123 11:57:57.435753 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lppl5"] Jan 23 11:57:57 crc kubenswrapper[4689]: I0123 11:57:57.572463 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lppl5" event={"ID":"889bc7cd-6141-496c-a98b-36cce981ca99","Type":"ContainerStarted","Data":"9687a56d3e26b382d477c655fe074b465a4dc0c641a28a5c6a8e5389a96846ef"} Jan 23 11:57:58 crc kubenswrapper[4689]: I0123 11:57:58.593821 4689 generic.go:334] "Generic (PLEG): container finished" podID="889bc7cd-6141-496c-a98b-36cce981ca99" containerID="719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986" exitCode=0 Jan 23 11:57:58 crc kubenswrapper[4689]: I0123 11:57:58.594067 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lppl5" event={"ID":"889bc7cd-6141-496c-a98b-36cce981ca99","Type":"ContainerDied","Data":"719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986"} Jan 23 11:57:58 crc kubenswrapper[4689]: I0123 11:57:58.597325 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 11:58:00 crc kubenswrapper[4689]: I0123 11:58:00.621932 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lppl5" event={"ID":"889bc7cd-6141-496c-a98b-36cce981ca99","Type":"ContainerStarted","Data":"1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b"} Jan 23 11:58:04 crc kubenswrapper[4689]: I0123 11:58:04.671698 4689 generic.go:334] "Generic (PLEG): container finished" podID="889bc7cd-6141-496c-a98b-36cce981ca99" containerID="1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b" exitCode=0 Jan 23 11:58:04 crc kubenswrapper[4689]: I0123 11:58:04.671807 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lppl5" event={"ID":"889bc7cd-6141-496c-a98b-36cce981ca99","Type":"ContainerDied","Data":"1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b"} Jan 23 11:58:05 crc kubenswrapper[4689]: I0123 11:58:05.690817 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lppl5" event={"ID":"889bc7cd-6141-496c-a98b-36cce981ca99","Type":"ContainerStarted","Data":"6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48"} Jan 23 11:58:05 crc kubenswrapper[4689]: I0123 11:58:05.713372 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lppl5" podStartSLOduration=3.180031814 podStartE2EDuration="9.713354373s" podCreationTimestamp="2026-01-23 11:57:56 +0000 UTC" firstStartedPulling="2026-01-23 11:57:58.597044037 +0000 UTC m=+4143.221723896" lastFinishedPulling="2026-01-23 11:58:05.130366596 +0000 UTC m=+4149.755046455" observedRunningTime="2026-01-23 11:58:05.712535723 +0000 UTC m=+4150.337215602" watchObservedRunningTime="2026-01-23 11:58:05.713354373 +0000 UTC m=+4150.338034242" Jan 23 11:58:06 crc kubenswrapper[4689]: I0123 11:58:06.951029 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:58:06 crc kubenswrapper[4689]: I0123 11:58:06.951613 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:58:08 crc kubenswrapper[4689]: I0123 11:58:08.022141 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lppl5" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="registry-server" probeResult="failure" output=< Jan 23 11:58:08 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 11:58:08 crc kubenswrapper[4689]: > Jan 23 11:58:17 crc kubenswrapper[4689]: I0123 11:58:17.041756 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:58:17 crc kubenswrapper[4689]: I0123 11:58:17.119679 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:58:17 crc kubenswrapper[4689]: I0123 11:58:17.292619 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lppl5"] Jan 23 11:58:18 crc kubenswrapper[4689]: I0123 11:58:18.853473 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lppl5" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="registry-server" containerID="cri-o://6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48" gracePeriod=2 Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.542714 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.566669 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-utilities\") pod \"889bc7cd-6141-496c-a98b-36cce981ca99\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.567007 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-catalog-content\") pod \"889bc7cd-6141-496c-a98b-36cce981ca99\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.567166 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8qtx\" (UniqueName: \"kubernetes.io/projected/889bc7cd-6141-496c-a98b-36cce981ca99-kube-api-access-z8qtx\") pod \"889bc7cd-6141-496c-a98b-36cce981ca99\" (UID: \"889bc7cd-6141-496c-a98b-36cce981ca99\") " Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.573426 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-utilities" (OuterVolumeSpecName: "utilities") pod "889bc7cd-6141-496c-a98b-36cce981ca99" (UID: "889bc7cd-6141-496c-a98b-36cce981ca99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.591359 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/889bc7cd-6141-496c-a98b-36cce981ca99-kube-api-access-z8qtx" (OuterVolumeSpecName: "kube-api-access-z8qtx") pod "889bc7cd-6141-496c-a98b-36cce981ca99" (UID: "889bc7cd-6141-496c-a98b-36cce981ca99"). InnerVolumeSpecName "kube-api-access-z8qtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.670191 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.670220 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8qtx\" (UniqueName: \"kubernetes.io/projected/889bc7cd-6141-496c-a98b-36cce981ca99-kube-api-access-z8qtx\") on node \"crc\" DevicePath \"\"" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.736334 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "889bc7cd-6141-496c-a98b-36cce981ca99" (UID: "889bc7cd-6141-496c-a98b-36cce981ca99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.771906 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889bc7cd-6141-496c-a98b-36cce981ca99-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.872987 4689 generic.go:334] "Generic (PLEG): container finished" podID="889bc7cd-6141-496c-a98b-36cce981ca99" containerID="6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48" exitCode=0 Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.873031 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lppl5" event={"ID":"889bc7cd-6141-496c-a98b-36cce981ca99","Type":"ContainerDied","Data":"6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48"} Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.873016 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lppl5" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.873062 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lppl5" event={"ID":"889bc7cd-6141-496c-a98b-36cce981ca99","Type":"ContainerDied","Data":"9687a56d3e26b382d477c655fe074b465a4dc0c641a28a5c6a8e5389a96846ef"} Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.873076 4689 scope.go:117] "RemoveContainer" containerID="6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.935119 4689 scope.go:117] "RemoveContainer" containerID="1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b" Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.943994 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lppl5"] Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.957796 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lppl5"] Jan 23 11:58:19 crc kubenswrapper[4689]: I0123 11:58:19.967322 4689 scope.go:117] "RemoveContainer" containerID="719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986" Jan 23 11:58:20 crc kubenswrapper[4689]: I0123 11:58:20.033266 4689 scope.go:117] "RemoveContainer" containerID="6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48" Jan 23 11:58:20 crc kubenswrapper[4689]: E0123 11:58:20.034029 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48\": container with ID starting with 6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48 not found: ID does not exist" containerID="6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48" Jan 23 11:58:20 crc kubenswrapper[4689]: I0123 11:58:20.034073 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48"} err="failed to get container status \"6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48\": rpc error: code = NotFound desc = could not find container \"6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48\": container with ID starting with 6d85d5ea42e18e6d7f7477ca068843a5689175d5278df1057bf057ecd79b4c48 not found: ID does not exist" Jan 23 11:58:20 crc kubenswrapper[4689]: I0123 11:58:20.034108 4689 scope.go:117] "RemoveContainer" containerID="1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b" Jan 23 11:58:20 crc kubenswrapper[4689]: E0123 11:58:20.034513 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b\": container with ID starting with 1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b not found: ID does not exist" containerID="1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b" Jan 23 11:58:20 crc kubenswrapper[4689]: I0123 11:58:20.034588 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b"} err="failed to get container status \"1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b\": rpc error: code = NotFound desc = could not find container \"1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b\": container with ID starting with 1e0f6edc5b51b3c3035db686d200acb8efe1b30558f9fb1a8e47510ff84cf30b not found: ID does not exist" Jan 23 11:58:20 crc kubenswrapper[4689]: I0123 11:58:20.034631 4689 scope.go:117] "RemoveContainer" containerID="719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986" Jan 23 11:58:20 crc kubenswrapper[4689]: E0123 11:58:20.035028 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986\": container with ID starting with 719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986 not found: ID does not exist" containerID="719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986" Jan 23 11:58:20 crc kubenswrapper[4689]: I0123 11:58:20.035059 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986"} err="failed to get container status \"719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986\": rpc error: code = NotFound desc = could not find container \"719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986\": container with ID starting with 719501842dabfac04eb285f53ae5a7624887efb2d3b6e0bb329ba9143bed3986 not found: ID does not exist" Jan 23 11:58:21 crc kubenswrapper[4689]: I0123 11:58:21.654408 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" path="/var/lib/kubelet/pods/889bc7cd-6141-496c-a98b-36cce981ca99/volumes" Jan 23 11:59:03 crc kubenswrapper[4689]: I0123 11:59:03.310949 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:59:03 crc kubenswrapper[4689]: I0123 11:59:03.311682 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 11:59:33 crc kubenswrapper[4689]: I0123 11:59:33.310690 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 11:59:33 crc kubenswrapper[4689]: I0123 11:59:33.311475 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.225809 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx"] Jan 23 12:00:00 crc kubenswrapper[4689]: E0123 12:00:00.226613 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="registry-server" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.226624 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="registry-server" Jan 23 12:00:00 crc kubenswrapper[4689]: E0123 12:00:00.226664 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="extract-content" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.226671 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="extract-content" Jan 23 12:00:00 crc kubenswrapper[4689]: E0123 12:00:00.226682 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="extract-utilities" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.226688 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="extract-utilities" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.226884 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="889bc7cd-6141-496c-a98b-36cce981ca99" containerName="registry-server" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.227640 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.230102 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.246872 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.251778 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx"] Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.299099 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-config-volume\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.299707 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-488br\" (UniqueName: \"kubernetes.io/projected/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-kube-api-access-488br\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.299788 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-secret-volume\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.401194 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-secret-volume\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.401335 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-config-volume\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.401437 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-488br\" (UniqueName: \"kubernetes.io/projected/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-kube-api-access-488br\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.403085 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-config-volume\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.406617 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-secret-volume\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.420734 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-488br\" (UniqueName: \"kubernetes.io/projected/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-kube-api-access-488br\") pod \"collect-profiles-29486160-rkhzx\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:00 crc kubenswrapper[4689]: I0123 12:00:00.546606 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:01 crc kubenswrapper[4689]: I0123 12:00:01.016396 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx"] Jan 23 12:00:01 crc kubenswrapper[4689]: W0123 12:00:01.041684 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58c5fdce_72e0_4c9f_8de4_78d73946b8b2.slice/crio-f7cdbad240f2ff2eedd80e70606e2c774903772612e57d7343149275049b98de WatchSource:0}: Error finding container f7cdbad240f2ff2eedd80e70606e2c774903772612e57d7343149275049b98de: Status 404 returned error can't find the container with id f7cdbad240f2ff2eedd80e70606e2c774903772612e57d7343149275049b98de Jan 23 12:00:01 crc kubenswrapper[4689]: I0123 12:00:01.353023 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" event={"ID":"58c5fdce-72e0-4c9f-8de4-78d73946b8b2","Type":"ContainerStarted","Data":"b0b37b4e1894b3b90c6eebe2e60f5b65a73fc702c073563decc24ea8f1553ac0"} Jan 23 12:00:01 crc kubenswrapper[4689]: I0123 12:00:01.353083 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" event={"ID":"58c5fdce-72e0-4c9f-8de4-78d73946b8b2","Type":"ContainerStarted","Data":"f7cdbad240f2ff2eedd80e70606e2c774903772612e57d7343149275049b98de"} Jan 23 12:00:01 crc kubenswrapper[4689]: I0123 12:00:01.370675 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" podStartSLOduration=1.370657872 podStartE2EDuration="1.370657872s" podCreationTimestamp="2026-01-23 12:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 12:00:01.369819982 +0000 UTC m=+4265.994499841" watchObservedRunningTime="2026-01-23 12:00:01.370657872 +0000 UTC m=+4265.995337731" Jan 23 12:00:02 crc kubenswrapper[4689]: I0123 12:00:02.369362 4689 generic.go:334] "Generic (PLEG): container finished" podID="58c5fdce-72e0-4c9f-8de4-78d73946b8b2" containerID="b0b37b4e1894b3b90c6eebe2e60f5b65a73fc702c073563decc24ea8f1553ac0" exitCode=0 Jan 23 12:00:02 crc kubenswrapper[4689]: I0123 12:00:02.369554 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" event={"ID":"58c5fdce-72e0-4c9f-8de4-78d73946b8b2","Type":"ContainerDied","Data":"b0b37b4e1894b3b90c6eebe2e60f5b65a73fc702c073563decc24ea8f1553ac0"} Jan 23 12:00:03 crc kubenswrapper[4689]: I0123 12:00:03.310511 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:00:03 crc kubenswrapper[4689]: I0123 12:00:03.310785 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:00:03 crc kubenswrapper[4689]: I0123 12:00:03.310835 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 12:00:03 crc kubenswrapper[4689]: I0123 12:00:03.311673 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 12:00:03 crc kubenswrapper[4689]: I0123 12:00:03.311741 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" gracePeriod=600 Jan 23 12:00:03 crc kubenswrapper[4689]: E0123 12:00:03.436198 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:00:03 crc kubenswrapper[4689]: I0123 12:00:03.820804 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.011615 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-secret-volume\") pod \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.011658 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-config-volume\") pod \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.011940 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-488br\" (UniqueName: \"kubernetes.io/projected/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-kube-api-access-488br\") pod \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\" (UID: \"58c5fdce-72e0-4c9f-8de4-78d73946b8b2\") " Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.020334 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "58c5fdce-72e0-4c9f-8de4-78d73946b8b2" (UID: "58c5fdce-72e0-4c9f-8de4-78d73946b8b2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.021526 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-config-volume" (OuterVolumeSpecName: "config-volume") pod "58c5fdce-72e0-4c9f-8de4-78d73946b8b2" (UID: "58c5fdce-72e0-4c9f-8de4-78d73946b8b2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.029045 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-kube-api-access-488br" (OuterVolumeSpecName: "kube-api-access-488br") pod "58c5fdce-72e0-4c9f-8de4-78d73946b8b2" (UID: "58c5fdce-72e0-4c9f-8de4-78d73946b8b2"). InnerVolumeSpecName "kube-api-access-488br". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.114827 4689 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.115084 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.115095 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-488br\" (UniqueName: \"kubernetes.io/projected/58c5fdce-72e0-4c9f-8de4-78d73946b8b2-kube-api-access-488br\") on node \"crc\" DevicePath \"\"" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.391173 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1"} Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.391238 4689 scope.go:117] "RemoveContainer" containerID="f4d10285ac6014cc648261077f0c20be3dc8143f4a324757f06d68fef8b377d1" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.391258 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" exitCode=0 Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.392119 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:00:04 crc kubenswrapper[4689]: E0123 12:00:04.392632 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.393710 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" event={"ID":"58c5fdce-72e0-4c9f-8de4-78d73946b8b2","Type":"ContainerDied","Data":"f7cdbad240f2ff2eedd80e70606e2c774903772612e57d7343149275049b98de"} Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.393742 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7cdbad240f2ff2eedd80e70606e2c774903772612e57d7343149275049b98de" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.393768 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486160-rkhzx" Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.474079 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn"] Jan 23 12:00:04 crc kubenswrapper[4689]: I0123 12:00:04.487852 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486115-6wbkn"] Jan 23 12:00:05 crc kubenswrapper[4689]: I0123 12:00:05.656819 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b302f4f-ab38-4efd-b4a7-917f5514cfd1" path="/var/lib/kubelet/pods/2b302f4f-ab38-4efd-b4a7-917f5514cfd1/volumes" Jan 23 12:00:16 crc kubenswrapper[4689]: I0123 12:00:16.640864 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:00:16 crc kubenswrapper[4689]: E0123 12:00:16.642054 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:00:27 crc kubenswrapper[4689]: I0123 12:00:27.646428 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:00:27 crc kubenswrapper[4689]: E0123 12:00:27.647061 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:00:30 crc kubenswrapper[4689]: I0123 12:00:30.324112 4689 scope.go:117] "RemoveContainer" containerID="77b144ba0f8bae66bb0cb0ab5d610d53876703b6b0daf45c926423cca7396597" Jan 23 12:00:41 crc kubenswrapper[4689]: I0123 12:00:41.640633 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:00:41 crc kubenswrapper[4689]: E0123 12:00:41.641764 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:00:55 crc kubenswrapper[4689]: I0123 12:00:55.649992 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:00:55 crc kubenswrapper[4689]: E0123 12:00:55.650921 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.156475 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29486161-msgk7"] Jan 23 12:01:00 crc kubenswrapper[4689]: E0123 12:01:00.157922 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58c5fdce-72e0-4c9f-8de4-78d73946b8b2" containerName="collect-profiles" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.157947 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="58c5fdce-72e0-4c9f-8de4-78d73946b8b2" containerName="collect-profiles" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.158518 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="58c5fdce-72e0-4c9f-8de4-78d73946b8b2" containerName="collect-profiles" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.159887 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.171511 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29486161-msgk7"] Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.289093 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-combined-ca-bundle\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.290212 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-config-data\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.290624 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vzkx\" (UniqueName: \"kubernetes.io/projected/e812dcb0-5d25-4173-8ae5-1c736542d1d3-kube-api-access-5vzkx\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.290987 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-fernet-keys\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.392968 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-fernet-keys\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.393090 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-combined-ca-bundle\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.393138 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-config-data\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.393237 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vzkx\" (UniqueName: \"kubernetes.io/projected/e812dcb0-5d25-4173-8ae5-1c736542d1d3-kube-api-access-5vzkx\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.401657 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-fernet-keys\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.402411 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-combined-ca-bundle\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.413798 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vzkx\" (UniqueName: \"kubernetes.io/projected/e812dcb0-5d25-4173-8ae5-1c736542d1d3-kube-api-access-5vzkx\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.414088 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-config-data\") pod \"keystone-cron-29486161-msgk7\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.498141 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:00 crc kubenswrapper[4689]: I0123 12:01:00.992342 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29486161-msgk7"] Jan 23 12:01:00 crc kubenswrapper[4689]: W0123 12:01:00.998253 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode812dcb0_5d25_4173_8ae5_1c736542d1d3.slice/crio-0bc4096068603f8940a9b8242459070da4d342d3ac006befc016a4e2e964c30d WatchSource:0}: Error finding container 0bc4096068603f8940a9b8242459070da4d342d3ac006befc016a4e2e964c30d: Status 404 returned error can't find the container with id 0bc4096068603f8940a9b8242459070da4d342d3ac006befc016a4e2e964c30d Jan 23 12:01:01 crc kubenswrapper[4689]: I0123 12:01:01.106432 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29486161-msgk7" event={"ID":"e812dcb0-5d25-4173-8ae5-1c736542d1d3","Type":"ContainerStarted","Data":"0bc4096068603f8940a9b8242459070da4d342d3ac006befc016a4e2e964c30d"} Jan 23 12:01:02 crc kubenswrapper[4689]: I0123 12:01:02.122882 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29486161-msgk7" event={"ID":"e812dcb0-5d25-4173-8ae5-1c736542d1d3","Type":"ContainerStarted","Data":"dedf2da322060cd536c0731f740dcbaa71ba2a7b91d0d9a58a52f889a62bbe2a"} Jan 23 12:01:02 crc kubenswrapper[4689]: I0123 12:01:02.150611 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29486161-msgk7" podStartSLOduration=2.150575884 podStartE2EDuration="2.150575884s" podCreationTimestamp="2026-01-23 12:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 12:01:02.144468743 +0000 UTC m=+4326.769148662" watchObservedRunningTime="2026-01-23 12:01:02.150575884 +0000 UTC m=+4326.775255783" Jan 23 12:01:04 crc kubenswrapper[4689]: I0123 12:01:04.160765 4689 generic.go:334] "Generic (PLEG): container finished" podID="e812dcb0-5d25-4173-8ae5-1c736542d1d3" containerID="dedf2da322060cd536c0731f740dcbaa71ba2a7b91d0d9a58a52f889a62bbe2a" exitCode=0 Jan 23 12:01:04 crc kubenswrapper[4689]: I0123 12:01:04.160869 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29486161-msgk7" event={"ID":"e812dcb0-5d25-4173-8ae5-1c736542d1d3","Type":"ContainerDied","Data":"dedf2da322060cd536c0731f740dcbaa71ba2a7b91d0d9a58a52f889a62bbe2a"} Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.613390 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.770061 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-fernet-keys\") pod \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.770274 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-combined-ca-bundle\") pod \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.770403 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-config-data\") pod \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.770517 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vzkx\" (UniqueName: \"kubernetes.io/projected/e812dcb0-5d25-4173-8ae5-1c736542d1d3-kube-api-access-5vzkx\") pod \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\" (UID: \"e812dcb0-5d25-4173-8ae5-1c736542d1d3\") " Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.775991 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e812dcb0-5d25-4173-8ae5-1c736542d1d3" (UID: "e812dcb0-5d25-4173-8ae5-1c736542d1d3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.777586 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e812dcb0-5d25-4173-8ae5-1c736542d1d3-kube-api-access-5vzkx" (OuterVolumeSpecName: "kube-api-access-5vzkx") pod "e812dcb0-5d25-4173-8ae5-1c736542d1d3" (UID: "e812dcb0-5d25-4173-8ae5-1c736542d1d3"). InnerVolumeSpecName "kube-api-access-5vzkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.829860 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e812dcb0-5d25-4173-8ae5-1c736542d1d3" (UID: "e812dcb0-5d25-4173-8ae5-1c736542d1d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.869833 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-config-data" (OuterVolumeSpecName: "config-data") pod "e812dcb0-5d25-4173-8ae5-1c736542d1d3" (UID: "e812dcb0-5d25-4173-8ae5-1c736542d1d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.874601 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vzkx\" (UniqueName: \"kubernetes.io/projected/e812dcb0-5d25-4173-8ae5-1c736542d1d3-kube-api-access-5vzkx\") on node \"crc\" DevicePath \"\"" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.874685 4689 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.874711 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 12:01:05 crc kubenswrapper[4689]: I0123 12:01:05.874728 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e812dcb0-5d25-4173-8ae5-1c736542d1d3-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 12:01:06 crc kubenswrapper[4689]: I0123 12:01:06.192474 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29486161-msgk7" event={"ID":"e812dcb0-5d25-4173-8ae5-1c736542d1d3","Type":"ContainerDied","Data":"0bc4096068603f8940a9b8242459070da4d342d3ac006befc016a4e2e964c30d"} Jan 23 12:01:06 crc kubenswrapper[4689]: I0123 12:01:06.192539 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bc4096068603f8940a9b8242459070da4d342d3ac006befc016a4e2e964c30d" Jan 23 12:01:06 crc kubenswrapper[4689]: I0123 12:01:06.192606 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29486161-msgk7" Jan 23 12:01:06 crc kubenswrapper[4689]: I0123 12:01:06.640786 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:01:06 crc kubenswrapper[4689]: E0123 12:01:06.641862 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.442773 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h8q7m"] Jan 23 12:01:16 crc kubenswrapper[4689]: E0123 12:01:16.444021 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e812dcb0-5d25-4173-8ae5-1c736542d1d3" containerName="keystone-cron" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.444036 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e812dcb0-5d25-4173-8ae5-1c736542d1d3" containerName="keystone-cron" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.444321 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e812dcb0-5d25-4173-8ae5-1c736542d1d3" containerName="keystone-cron" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.446210 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.457853 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h8q7m"] Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.562613 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-utilities\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.562724 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-catalog-content\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.562912 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58xsf\" (UniqueName: \"kubernetes.io/projected/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-kube-api-access-58xsf\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.665399 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-catalog-content\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.665914 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-catalog-content\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.667223 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58xsf\" (UniqueName: \"kubernetes.io/projected/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-kube-api-access-58xsf\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.667507 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-utilities\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.667837 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-utilities\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.693966 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58xsf\" (UniqueName: \"kubernetes.io/projected/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-kube-api-access-58xsf\") pod \"community-operators-h8q7m\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:16 crc kubenswrapper[4689]: I0123 12:01:16.775348 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:17 crc kubenswrapper[4689]: I0123 12:01:17.299866 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h8q7m"] Jan 23 12:01:17 crc kubenswrapper[4689]: I0123 12:01:17.333036 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8q7m" event={"ID":"551dc9ac-0ae2-4bb6-bed0-dc2895d923da","Type":"ContainerStarted","Data":"dad2c2ceba3e4f13cecf9c7191b905d02b5fd22b33fd1f3e0d1f2dd027468c08"} Jan 23 12:01:18 crc kubenswrapper[4689]: I0123 12:01:18.347504 4689 generic.go:334] "Generic (PLEG): container finished" podID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerID="a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98" exitCode=0 Jan 23 12:01:18 crc kubenswrapper[4689]: I0123 12:01:18.347594 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8q7m" event={"ID":"551dc9ac-0ae2-4bb6-bed0-dc2895d923da","Type":"ContainerDied","Data":"a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98"} Jan 23 12:01:19 crc kubenswrapper[4689]: I0123 12:01:19.360602 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8q7m" event={"ID":"551dc9ac-0ae2-4bb6-bed0-dc2895d923da","Type":"ContainerStarted","Data":"9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540"} Jan 23 12:01:19 crc kubenswrapper[4689]: I0123 12:01:19.640745 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:01:19 crc kubenswrapper[4689]: E0123 12:01:19.641390 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:01:21 crc kubenswrapper[4689]: I0123 12:01:21.383035 4689 generic.go:334] "Generic (PLEG): container finished" podID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerID="9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540" exitCode=0 Jan 23 12:01:21 crc kubenswrapper[4689]: I0123 12:01:21.383117 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8q7m" event={"ID":"551dc9ac-0ae2-4bb6-bed0-dc2895d923da","Type":"ContainerDied","Data":"9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540"} Jan 23 12:01:22 crc kubenswrapper[4689]: I0123 12:01:22.394707 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8q7m" event={"ID":"551dc9ac-0ae2-4bb6-bed0-dc2895d923da","Type":"ContainerStarted","Data":"5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5"} Jan 23 12:01:22 crc kubenswrapper[4689]: I0123 12:01:22.422356 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h8q7m" podStartSLOduration=2.895797405 podStartE2EDuration="6.422327374s" podCreationTimestamp="2026-01-23 12:01:16 +0000 UTC" firstStartedPulling="2026-01-23 12:01:18.350379965 +0000 UTC m=+4342.975059854" lastFinishedPulling="2026-01-23 12:01:21.876909924 +0000 UTC m=+4346.501589823" observedRunningTime="2026-01-23 12:01:22.417174568 +0000 UTC m=+4347.041854427" watchObservedRunningTime="2026-01-23 12:01:22.422327374 +0000 UTC m=+4347.047007273" Jan 23 12:01:26 crc kubenswrapper[4689]: I0123 12:01:26.776531 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:26 crc kubenswrapper[4689]: I0123 12:01:26.777192 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:26 crc kubenswrapper[4689]: I0123 12:01:26.831387 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:27 crc kubenswrapper[4689]: I0123 12:01:27.543175 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:27 crc kubenswrapper[4689]: I0123 12:01:27.628359 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h8q7m"] Jan 23 12:01:29 crc kubenswrapper[4689]: I0123 12:01:29.499279 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-h8q7m" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="registry-server" containerID="cri-o://5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5" gracePeriod=2 Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.097030 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.242932 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58xsf\" (UniqueName: \"kubernetes.io/projected/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-kube-api-access-58xsf\") pod \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.243091 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-utilities\") pod \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.243129 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-catalog-content\") pod \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\" (UID: \"551dc9ac-0ae2-4bb6-bed0-dc2895d923da\") " Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.243931 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-utilities" (OuterVolumeSpecName: "utilities") pod "551dc9ac-0ae2-4bb6-bed0-dc2895d923da" (UID: "551dc9ac-0ae2-4bb6-bed0-dc2895d923da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.248657 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-kube-api-access-58xsf" (OuterVolumeSpecName: "kube-api-access-58xsf") pod "551dc9ac-0ae2-4bb6-bed0-dc2895d923da" (UID: "551dc9ac-0ae2-4bb6-bed0-dc2895d923da"). InnerVolumeSpecName "kube-api-access-58xsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.299363 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "551dc9ac-0ae2-4bb6-bed0-dc2895d923da" (UID: "551dc9ac-0ae2-4bb6-bed0-dc2895d923da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.345721 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58xsf\" (UniqueName: \"kubernetes.io/projected/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-kube-api-access-58xsf\") on node \"crc\" DevicePath \"\"" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.345754 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.345767 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/551dc9ac-0ae2-4bb6-bed0-dc2895d923da-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.544353 4689 generic.go:334] "Generic (PLEG): container finished" podID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerID="5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5" exitCode=0 Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.544781 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8q7m" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.544805 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8q7m" event={"ID":"551dc9ac-0ae2-4bb6-bed0-dc2895d923da","Type":"ContainerDied","Data":"5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5"} Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.546002 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8q7m" event={"ID":"551dc9ac-0ae2-4bb6-bed0-dc2895d923da","Type":"ContainerDied","Data":"dad2c2ceba3e4f13cecf9c7191b905d02b5fd22b33fd1f3e0d1f2dd027468c08"} Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.546041 4689 scope.go:117] "RemoveContainer" containerID="5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5" Jan 23 12:01:30 crc kubenswrapper[4689]: E0123 12:01:30.547409 4689 kuberuntime_gc.go:389] "Failed to remove container log dead symlink" err="remove /var/log/containers/community-operators-h8q7m_openshift-marketplace_registry-server-5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5.log: no such file or directory" path="/var/log/containers/community-operators-h8q7m_openshift-marketplace_registry-server-5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5.log" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.601006 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h8q7m"] Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.615376 4689 scope.go:117] "RemoveContainer" containerID="9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.620561 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-h8q7m"] Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.685283 4689 scope.go:117] "RemoveContainer" containerID="a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.766453 4689 scope.go:117] "RemoveContainer" containerID="5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5" Jan 23 12:01:30 crc kubenswrapper[4689]: E0123 12:01:30.775291 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5\": container with ID starting with 5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5 not found: ID does not exist" containerID="5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.775544 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5"} err="failed to get container status \"5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5\": rpc error: code = NotFound desc = could not find container \"5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5\": container with ID starting with 5de40a66c1fd4babb718527e1f16934851f04cc189fadb971cc68faef29cd6a5 not found: ID does not exist" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.775630 4689 scope.go:117] "RemoveContainer" containerID="9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540" Jan 23 12:01:30 crc kubenswrapper[4689]: E0123 12:01:30.782630 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540\": container with ID starting with 9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540 not found: ID does not exist" containerID="9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.782851 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540"} err="failed to get container status \"9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540\": rpc error: code = NotFound desc = could not find container \"9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540\": container with ID starting with 9211789ca4cbb31976356a20bfc5f79577534c892c9a2e3b82e2dcefa59eb540 not found: ID does not exist" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.782932 4689 scope.go:117] "RemoveContainer" containerID="a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98" Jan 23 12:01:30 crc kubenswrapper[4689]: E0123 12:01:30.787479 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98\": container with ID starting with a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98 not found: ID does not exist" containerID="a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98" Jan 23 12:01:30 crc kubenswrapper[4689]: I0123 12:01:30.787532 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98"} err="failed to get container status \"a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98\": rpc error: code = NotFound desc = could not find container \"a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98\": container with ID starting with a0f381bf44c923ba2ed0126ccceb31b47bc2f44564b4b1d5463c56fba8e04c98 not found: ID does not exist" Jan 23 12:01:31 crc kubenswrapper[4689]: I0123 12:01:31.640746 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:01:31 crc kubenswrapper[4689]: E0123 12:01:31.642478 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:01:31 crc kubenswrapper[4689]: I0123 12:01:31.657412 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" path="/var/lib/kubelet/pods/551dc9ac-0ae2-4bb6-bed0-dc2895d923da/volumes" Jan 23 12:01:42 crc kubenswrapper[4689]: I0123 12:01:42.640447 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:01:42 crc kubenswrapper[4689]: E0123 12:01:42.641378 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:01:55 crc kubenswrapper[4689]: I0123 12:01:55.647976 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:01:55 crc kubenswrapper[4689]: E0123 12:01:55.648860 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:02:08 crc kubenswrapper[4689]: I0123 12:02:08.640760 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:02:08 crc kubenswrapper[4689]: E0123 12:02:08.641809 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:02:23 crc kubenswrapper[4689]: I0123 12:02:23.639916 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:02:23 crc kubenswrapper[4689]: E0123 12:02:23.640632 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:02:38 crc kubenswrapper[4689]: I0123 12:02:38.639921 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:02:38 crc kubenswrapper[4689]: E0123 12:02:38.640676 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:02:51 crc kubenswrapper[4689]: I0123 12:02:51.644084 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:02:51 crc kubenswrapper[4689]: E0123 12:02:51.644887 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:03:05 crc kubenswrapper[4689]: I0123 12:03:05.648863 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:03:05 crc kubenswrapper[4689]: E0123 12:03:05.649651 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:03:17 crc kubenswrapper[4689]: I0123 12:03:17.641295 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:03:17 crc kubenswrapper[4689]: E0123 12:03:17.642493 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:03:29 crc kubenswrapper[4689]: I0123 12:03:29.640869 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:03:29 crc kubenswrapper[4689]: E0123 12:03:29.641773 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:03:44 crc kubenswrapper[4689]: I0123 12:03:44.641026 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:03:44 crc kubenswrapper[4689]: E0123 12:03:44.642596 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:03:55 crc kubenswrapper[4689]: I0123 12:03:55.648888 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:03:55 crc kubenswrapper[4689]: E0123 12:03:55.649825 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:04:10 crc kubenswrapper[4689]: I0123 12:04:10.640145 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:04:10 crc kubenswrapper[4689]: E0123 12:04:10.641171 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.532112 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zklvr"] Jan 23 12:04:11 crc kubenswrapper[4689]: E0123 12:04:11.533410 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="registry-server" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.533442 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="registry-server" Jan 23 12:04:11 crc kubenswrapper[4689]: E0123 12:04:11.533481 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="extract-utilities" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.533496 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="extract-utilities" Jan 23 12:04:11 crc kubenswrapper[4689]: E0123 12:04:11.533527 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="extract-content" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.533541 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="extract-content" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.534029 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="551dc9ac-0ae2-4bb6-bed0-dc2895d923da" containerName="registry-server" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.538741 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.552065 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zklvr"] Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.704540 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz9k6\" (UniqueName: \"kubernetes.io/projected/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-kube-api-access-mz9k6\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.705688 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-catalog-content\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.705878 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-utilities\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.808846 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz9k6\" (UniqueName: \"kubernetes.io/projected/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-kube-api-access-mz9k6\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.809525 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-catalog-content\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.809729 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-utilities\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.811007 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-catalog-content\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.811047 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-utilities\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.848673 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz9k6\" (UniqueName: \"kubernetes.io/projected/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-kube-api-access-mz9k6\") pod \"redhat-marketplace-zklvr\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:11 crc kubenswrapper[4689]: I0123 12:04:11.877618 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:12 crc kubenswrapper[4689]: I0123 12:04:12.502924 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zklvr"] Jan 23 12:04:13 crc kubenswrapper[4689]: I0123 12:04:13.505619 4689 generic.go:334] "Generic (PLEG): container finished" podID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerID="9f7a0688f012c0a1ccb1df2b73453f9c6ffb3e7a14efc92a172b32dbbae83aa7" exitCode=0 Jan 23 12:04:13 crc kubenswrapper[4689]: I0123 12:04:13.505795 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zklvr" event={"ID":"b818d30e-5d38-4b4a-b0bc-e26431f2a17c","Type":"ContainerDied","Data":"9f7a0688f012c0a1ccb1df2b73453f9c6ffb3e7a14efc92a172b32dbbae83aa7"} Jan 23 12:04:13 crc kubenswrapper[4689]: I0123 12:04:13.506795 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zklvr" event={"ID":"b818d30e-5d38-4b4a-b0bc-e26431f2a17c","Type":"ContainerStarted","Data":"6e06db85f2fea40c535d867f2fa02dcc9316cd71ccd1883896cb0708b78ad1c9"} Jan 23 12:04:13 crc kubenswrapper[4689]: I0123 12:04:13.510670 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 12:04:14 crc kubenswrapper[4689]: I0123 12:04:14.524087 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zklvr" event={"ID":"b818d30e-5d38-4b4a-b0bc-e26431f2a17c","Type":"ContainerStarted","Data":"6be44619b3623b0b63a4173d9c6bc04b3d7d918b6ea42619f66f6492c05c099d"} Jan 23 12:04:15 crc kubenswrapper[4689]: I0123 12:04:15.536641 4689 generic.go:334] "Generic (PLEG): container finished" podID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerID="6be44619b3623b0b63a4173d9c6bc04b3d7d918b6ea42619f66f6492c05c099d" exitCode=0 Jan 23 12:04:15 crc kubenswrapper[4689]: I0123 12:04:15.536685 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zklvr" event={"ID":"b818d30e-5d38-4b4a-b0bc-e26431f2a17c","Type":"ContainerDied","Data":"6be44619b3623b0b63a4173d9c6bc04b3d7d918b6ea42619f66f6492c05c099d"} Jan 23 12:04:16 crc kubenswrapper[4689]: I0123 12:04:16.557488 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zklvr" event={"ID":"b818d30e-5d38-4b4a-b0bc-e26431f2a17c","Type":"ContainerStarted","Data":"872892e7838c702d196d694cecb4b277bd547ac8c1cd360d2d87cdf315d35e61"} Jan 23 12:04:16 crc kubenswrapper[4689]: I0123 12:04:16.589310 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zklvr" podStartSLOduration=3.076166496 podStartE2EDuration="5.589279493s" podCreationTimestamp="2026-01-23 12:04:11 +0000 UTC" firstStartedPulling="2026-01-23 12:04:13.510378154 +0000 UTC m=+4518.135058013" lastFinishedPulling="2026-01-23 12:04:16.023491131 +0000 UTC m=+4520.648171010" observedRunningTime="2026-01-23 12:04:16.583525444 +0000 UTC m=+4521.208205323" watchObservedRunningTime="2026-01-23 12:04:16.589279493 +0000 UTC m=+4521.213959382" Jan 23 12:04:21 crc kubenswrapper[4689]: I0123 12:04:21.878629 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:21 crc kubenswrapper[4689]: I0123 12:04:21.879462 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:21 crc kubenswrapper[4689]: I0123 12:04:21.946911 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:22 crc kubenswrapper[4689]: I0123 12:04:22.640511 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:04:22 crc kubenswrapper[4689]: E0123 12:04:22.640926 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:04:22 crc kubenswrapper[4689]: I0123 12:04:22.707894 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:25 crc kubenswrapper[4689]: I0123 12:04:25.476697 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zklvr"] Jan 23 12:04:25 crc kubenswrapper[4689]: I0123 12:04:25.477438 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zklvr" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="registry-server" containerID="cri-o://872892e7838c702d196d694cecb4b277bd547ac8c1cd360d2d87cdf315d35e61" gracePeriod=2 Jan 23 12:04:25 crc kubenswrapper[4689]: I0123 12:04:25.669046 4689 generic.go:334] "Generic (PLEG): container finished" podID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerID="872892e7838c702d196d694cecb4b277bd547ac8c1cd360d2d87cdf315d35e61" exitCode=0 Jan 23 12:04:25 crc kubenswrapper[4689]: I0123 12:04:25.669141 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zklvr" event={"ID":"b818d30e-5d38-4b4a-b0bc-e26431f2a17c","Type":"ContainerDied","Data":"872892e7838c702d196d694cecb4b277bd547ac8c1cd360d2d87cdf315d35e61"} Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.021597 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.135124 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-utilities\") pod \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.135306 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz9k6\" (UniqueName: \"kubernetes.io/projected/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-kube-api-access-mz9k6\") pod \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.135337 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-catalog-content\") pod \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\" (UID: \"b818d30e-5d38-4b4a-b0bc-e26431f2a17c\") " Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.136623 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-utilities" (OuterVolumeSpecName: "utilities") pod "b818d30e-5d38-4b4a-b0bc-e26431f2a17c" (UID: "b818d30e-5d38-4b4a-b0bc-e26431f2a17c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.142446 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-kube-api-access-mz9k6" (OuterVolumeSpecName: "kube-api-access-mz9k6") pod "b818d30e-5d38-4b4a-b0bc-e26431f2a17c" (UID: "b818d30e-5d38-4b4a-b0bc-e26431f2a17c"). InnerVolumeSpecName "kube-api-access-mz9k6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.168290 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b818d30e-5d38-4b4a-b0bc-e26431f2a17c" (UID: "b818d30e-5d38-4b4a-b0bc-e26431f2a17c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.238763 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.238806 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mz9k6\" (UniqueName: \"kubernetes.io/projected/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-kube-api-access-mz9k6\") on node \"crc\" DevicePath \"\"" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.238820 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b818d30e-5d38-4b4a-b0bc-e26431f2a17c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.688130 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zklvr" event={"ID":"b818d30e-5d38-4b4a-b0bc-e26431f2a17c","Type":"ContainerDied","Data":"6e06db85f2fea40c535d867f2fa02dcc9316cd71ccd1883896cb0708b78ad1c9"} Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.688270 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zklvr" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.688665 4689 scope.go:117] "RemoveContainer" containerID="872892e7838c702d196d694cecb4b277bd547ac8c1cd360d2d87cdf315d35e61" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.749251 4689 scope.go:117] "RemoveContainer" containerID="6be44619b3623b0b63a4173d9c6bc04b3d7d918b6ea42619f66f6492c05c099d" Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.760480 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zklvr"] Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.789502 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zklvr"] Jan 23 12:04:26 crc kubenswrapper[4689]: I0123 12:04:26.804687 4689 scope.go:117] "RemoveContainer" containerID="9f7a0688f012c0a1ccb1df2b73453f9c6ffb3e7a14efc92a172b32dbbae83aa7" Jan 23 12:04:27 crc kubenswrapper[4689]: I0123 12:04:27.659988 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" path="/var/lib/kubelet/pods/b818d30e-5d38-4b4a-b0bc-e26431f2a17c/volumes" Jan 23 12:04:34 crc kubenswrapper[4689]: I0123 12:04:34.640417 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:04:34 crc kubenswrapper[4689]: E0123 12:04:34.641294 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:04:48 crc kubenswrapper[4689]: I0123 12:04:48.642209 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:04:48 crc kubenswrapper[4689]: E0123 12:04:48.644394 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:05:03 crc kubenswrapper[4689]: I0123 12:05:03.641163 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:05:04 crc kubenswrapper[4689]: I0123 12:05:04.183063 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"206a1b16f1604cff0d07556ce63f0cdaba1e0a91957274881f21375633285d2a"} Jan 23 12:06:20 crc kubenswrapper[4689]: I0123 12:06:20.874318 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2bm9l"] Jan 23 12:06:20 crc kubenswrapper[4689]: E0123 12:06:20.875561 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="extract-content" Jan 23 12:06:20 crc kubenswrapper[4689]: I0123 12:06:20.875578 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="extract-content" Jan 23 12:06:20 crc kubenswrapper[4689]: E0123 12:06:20.875609 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="registry-server" Jan 23 12:06:20 crc kubenswrapper[4689]: I0123 12:06:20.875619 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="registry-server" Jan 23 12:06:20 crc kubenswrapper[4689]: E0123 12:06:20.875652 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="extract-utilities" Jan 23 12:06:20 crc kubenswrapper[4689]: I0123 12:06:20.875661 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="extract-utilities" Jan 23 12:06:20 crc kubenswrapper[4689]: I0123 12:06:20.875987 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b818d30e-5d38-4b4a-b0bc-e26431f2a17c" containerName="registry-server" Jan 23 12:06:20 crc kubenswrapper[4689]: I0123 12:06:20.878573 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:20 crc kubenswrapper[4689]: I0123 12:06:20.889187 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2bm9l"] Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.037094 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfpt5\" (UniqueName: \"kubernetes.io/projected/1ed317eb-18d7-4665-912e-b485f42d72ac-kube-api-access-gfpt5\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.037844 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-utilities\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.037968 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-catalog-content\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.140035 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfpt5\" (UniqueName: \"kubernetes.io/projected/1ed317eb-18d7-4665-912e-b485f42d72ac-kube-api-access-gfpt5\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.140171 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-utilities\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.140248 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-catalog-content\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.140866 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-utilities\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.140992 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-catalog-content\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.163572 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfpt5\" (UniqueName: \"kubernetes.io/projected/1ed317eb-18d7-4665-912e-b485f42d72ac-kube-api-access-gfpt5\") pod \"certified-operators-2bm9l\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.222458 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:21 crc kubenswrapper[4689]: I0123 12:06:21.736271 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2bm9l"] Jan 23 12:06:22 crc kubenswrapper[4689]: I0123 12:06:22.133435 4689 generic.go:334] "Generic (PLEG): container finished" podID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerID="49511488ae79290fd7c7bf1c1b066b7144707bae93e52c117849446a31a2e982" exitCode=0 Jan 23 12:06:22 crc kubenswrapper[4689]: I0123 12:06:22.133476 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2bm9l" event={"ID":"1ed317eb-18d7-4665-912e-b485f42d72ac","Type":"ContainerDied","Data":"49511488ae79290fd7c7bf1c1b066b7144707bae93e52c117849446a31a2e982"} Jan 23 12:06:22 crc kubenswrapper[4689]: I0123 12:06:22.133514 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2bm9l" event={"ID":"1ed317eb-18d7-4665-912e-b485f42d72ac","Type":"ContainerStarted","Data":"e4eef1ecc09cfd11b40f48e12b54e202c10b2fc9a6d9ba99ebf1bcba902705c4"} Jan 23 12:06:23 crc kubenswrapper[4689]: I0123 12:06:23.150272 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2bm9l" event={"ID":"1ed317eb-18d7-4665-912e-b485f42d72ac","Type":"ContainerStarted","Data":"e25f531e28ef1ac692234b09415761e6a7d1af331fbbd04d7b1de59b0dae0d7f"} Jan 23 12:06:24 crc kubenswrapper[4689]: I0123 12:06:24.160894 4689 generic.go:334] "Generic (PLEG): container finished" podID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerID="e25f531e28ef1ac692234b09415761e6a7d1af331fbbd04d7b1de59b0dae0d7f" exitCode=0 Jan 23 12:06:24 crc kubenswrapper[4689]: I0123 12:06:24.161003 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2bm9l" event={"ID":"1ed317eb-18d7-4665-912e-b485f42d72ac","Type":"ContainerDied","Data":"e25f531e28ef1ac692234b09415761e6a7d1af331fbbd04d7b1de59b0dae0d7f"} Jan 23 12:06:25 crc kubenswrapper[4689]: I0123 12:06:25.173734 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2bm9l" event={"ID":"1ed317eb-18d7-4665-912e-b485f42d72ac","Type":"ContainerStarted","Data":"16c96ba1b01194f5ab2f103ee56c2003c3a85b8de8682ce44cde38ef92ec45c6"} Jan 23 12:06:25 crc kubenswrapper[4689]: I0123 12:06:25.195952 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2bm9l" podStartSLOduration=2.6730637059999998 podStartE2EDuration="5.195928214s" podCreationTimestamp="2026-01-23 12:06:20 +0000 UTC" firstStartedPulling="2026-01-23 12:06:22.135901761 +0000 UTC m=+4646.760581620" lastFinishedPulling="2026-01-23 12:06:24.658766269 +0000 UTC m=+4649.283446128" observedRunningTime="2026-01-23 12:06:25.190818599 +0000 UTC m=+4649.815498468" watchObservedRunningTime="2026-01-23 12:06:25.195928214 +0000 UTC m=+4649.820608073" Jan 23 12:06:31 crc kubenswrapper[4689]: I0123 12:06:31.223695 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:31 crc kubenswrapper[4689]: I0123 12:06:31.224183 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:31 crc kubenswrapper[4689]: I0123 12:06:31.278737 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:31 crc kubenswrapper[4689]: I0123 12:06:31.355961 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:33 crc kubenswrapper[4689]: I0123 12:06:33.748100 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2bm9l"] Jan 23 12:06:33 crc kubenswrapper[4689]: I0123 12:06:33.749354 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2bm9l" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="registry-server" containerID="cri-o://16c96ba1b01194f5ab2f103ee56c2003c3a85b8de8682ce44cde38ef92ec45c6" gracePeriod=2 Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.272578 4689 generic.go:334] "Generic (PLEG): container finished" podID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerID="16c96ba1b01194f5ab2f103ee56c2003c3a85b8de8682ce44cde38ef92ec45c6" exitCode=0 Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.272651 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2bm9l" event={"ID":"1ed317eb-18d7-4665-912e-b485f42d72ac","Type":"ContainerDied","Data":"16c96ba1b01194f5ab2f103ee56c2003c3a85b8de8682ce44cde38ef92ec45c6"} Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.272955 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2bm9l" event={"ID":"1ed317eb-18d7-4665-912e-b485f42d72ac","Type":"ContainerDied","Data":"e4eef1ecc09cfd11b40f48e12b54e202c10b2fc9a6d9ba99ebf1bcba902705c4"} Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.272977 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4eef1ecc09cfd11b40f48e12b54e202c10b2fc9a6d9ba99ebf1bcba902705c4" Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.328690 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.503308 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-catalog-content\") pod \"1ed317eb-18d7-4665-912e-b485f42d72ac\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.503377 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-utilities\") pod \"1ed317eb-18d7-4665-912e-b485f42d72ac\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.503424 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfpt5\" (UniqueName: \"kubernetes.io/projected/1ed317eb-18d7-4665-912e-b485f42d72ac-kube-api-access-gfpt5\") pod \"1ed317eb-18d7-4665-912e-b485f42d72ac\" (UID: \"1ed317eb-18d7-4665-912e-b485f42d72ac\") " Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.504108 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-utilities" (OuterVolumeSpecName: "utilities") pod "1ed317eb-18d7-4665-912e-b485f42d72ac" (UID: "1ed317eb-18d7-4665-912e-b485f42d72ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.504465 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.511340 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ed317eb-18d7-4665-912e-b485f42d72ac-kube-api-access-gfpt5" (OuterVolumeSpecName: "kube-api-access-gfpt5") pod "1ed317eb-18d7-4665-912e-b485f42d72ac" (UID: "1ed317eb-18d7-4665-912e-b485f42d72ac"). InnerVolumeSpecName "kube-api-access-gfpt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.570935 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1ed317eb-18d7-4665-912e-b485f42d72ac" (UID: "1ed317eb-18d7-4665-912e-b485f42d72ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.606744 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ed317eb-18d7-4665-912e-b485f42d72ac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:06:34 crc kubenswrapper[4689]: I0123 12:06:34.606781 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfpt5\" (UniqueName: \"kubernetes.io/projected/1ed317eb-18d7-4665-912e-b485f42d72ac-kube-api-access-gfpt5\") on node \"crc\" DevicePath \"\"" Jan 23 12:06:35 crc kubenswrapper[4689]: I0123 12:06:35.286254 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2bm9l" Jan 23 12:06:35 crc kubenswrapper[4689]: I0123 12:06:35.342665 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2bm9l"] Jan 23 12:06:35 crc kubenswrapper[4689]: I0123 12:06:35.357214 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2bm9l"] Jan 23 12:06:35 crc kubenswrapper[4689]: I0123 12:06:35.673899 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" path="/var/lib/kubelet/pods/1ed317eb-18d7-4665-912e-b485f42d72ac/volumes" Jan 23 12:07:03 crc kubenswrapper[4689]: I0123 12:07:03.311209 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:07:03 crc kubenswrapper[4689]: I0123 12:07:03.311934 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:07:33 crc kubenswrapper[4689]: I0123 12:07:33.311315 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:07:33 crc kubenswrapper[4689]: I0123 12:07:33.311924 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:08:00 crc kubenswrapper[4689]: I0123 12:08:00.985227 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bfvpq"] Jan 23 12:08:00 crc kubenswrapper[4689]: E0123 12:08:00.986311 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="registry-server" Jan 23 12:08:00 crc kubenswrapper[4689]: I0123 12:08:00.986330 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="registry-server" Jan 23 12:08:00 crc kubenswrapper[4689]: E0123 12:08:00.986359 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="extract-utilities" Jan 23 12:08:00 crc kubenswrapper[4689]: I0123 12:08:00.986367 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="extract-utilities" Jan 23 12:08:00 crc kubenswrapper[4689]: E0123 12:08:00.986397 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="extract-content" Jan 23 12:08:00 crc kubenswrapper[4689]: I0123 12:08:00.986405 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="extract-content" Jan 23 12:08:00 crc kubenswrapper[4689]: I0123 12:08:00.986698 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ed317eb-18d7-4665-912e-b485f42d72ac" containerName="registry-server" Jan 23 12:08:00 crc kubenswrapper[4689]: I0123 12:08:00.988929 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.002784 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bfvpq"] Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.052352 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-catalog-content\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.052822 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-utilities\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.053139 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hntj5\" (UniqueName: \"kubernetes.io/projected/ed1a7018-1083-4a78-84e2-40281d48efe3-kube-api-access-hntj5\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.154989 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hntj5\" (UniqueName: \"kubernetes.io/projected/ed1a7018-1083-4a78-84e2-40281d48efe3-kube-api-access-hntj5\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.155121 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-catalog-content\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.155316 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-utilities\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.155594 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-catalog-content\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.155831 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-utilities\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.173822 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hntj5\" (UniqueName: \"kubernetes.io/projected/ed1a7018-1083-4a78-84e2-40281d48efe3-kube-api-access-hntj5\") pod \"redhat-operators-bfvpq\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.318554 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:01 crc kubenswrapper[4689]: I0123 12:08:01.836137 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bfvpq"] Jan 23 12:08:02 crc kubenswrapper[4689]: I0123 12:08:02.343584 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerStarted","Data":"9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d"} Jan 23 12:08:02 crc kubenswrapper[4689]: I0123 12:08:02.343630 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerStarted","Data":"bb8a3d9aeba1cceed42404e87bde441a25bad1432a0c89894ecd722c4a55ae5a"} Jan 23 12:08:03 crc kubenswrapper[4689]: I0123 12:08:03.310835 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:08:03 crc kubenswrapper[4689]: I0123 12:08:03.311087 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:08:03 crc kubenswrapper[4689]: I0123 12:08:03.311133 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 12:08:03 crc kubenswrapper[4689]: I0123 12:08:03.312326 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"206a1b16f1604cff0d07556ce63f0cdaba1e0a91957274881f21375633285d2a"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 12:08:03 crc kubenswrapper[4689]: I0123 12:08:03.312389 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://206a1b16f1604cff0d07556ce63f0cdaba1e0a91957274881f21375633285d2a" gracePeriod=600 Jan 23 12:08:03 crc kubenswrapper[4689]: I0123 12:08:03.361322 4689 generic.go:334] "Generic (PLEG): container finished" podID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerID="9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d" exitCode=0 Jan 23 12:08:03 crc kubenswrapper[4689]: I0123 12:08:03.361371 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerDied","Data":"9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d"} Jan 23 12:08:04 crc kubenswrapper[4689]: I0123 12:08:04.374506 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="206a1b16f1604cff0d07556ce63f0cdaba1e0a91957274881f21375633285d2a" exitCode=0 Jan 23 12:08:04 crc kubenswrapper[4689]: I0123 12:08:04.374556 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"206a1b16f1604cff0d07556ce63f0cdaba1e0a91957274881f21375633285d2a"} Jan 23 12:08:04 crc kubenswrapper[4689]: I0123 12:08:04.375136 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51"} Jan 23 12:08:04 crc kubenswrapper[4689]: I0123 12:08:04.375180 4689 scope.go:117] "RemoveContainer" containerID="ecb599f294792be14ee00213934812d0350cc7a4efb8b633f4ff38e87de693a1" Jan 23 12:08:05 crc kubenswrapper[4689]: I0123 12:08:05.390813 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerStarted","Data":"7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4"} Jan 23 12:08:11 crc kubenswrapper[4689]: I0123 12:08:11.490381 4689 generic.go:334] "Generic (PLEG): container finished" podID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerID="7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4" exitCode=0 Jan 23 12:08:11 crc kubenswrapper[4689]: I0123 12:08:11.490443 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerDied","Data":"7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4"} Jan 23 12:08:12 crc kubenswrapper[4689]: I0123 12:08:12.503275 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerStarted","Data":"b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4"} Jan 23 12:08:12 crc kubenswrapper[4689]: I0123 12:08:12.534670 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bfvpq" podStartSLOduration=3.789570126 podStartE2EDuration="12.534645938s" podCreationTimestamp="2026-01-23 12:08:00 +0000 UTC" firstStartedPulling="2026-01-23 12:08:03.364859376 +0000 UTC m=+4747.989539275" lastFinishedPulling="2026-01-23 12:08:12.109935228 +0000 UTC m=+4756.734615087" observedRunningTime="2026-01-23 12:08:12.520567174 +0000 UTC m=+4757.145247023" watchObservedRunningTime="2026-01-23 12:08:12.534645938 +0000 UTC m=+4757.159325797" Jan 23 12:08:21 crc kubenswrapper[4689]: I0123 12:08:21.318839 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:21 crc kubenswrapper[4689]: I0123 12:08:21.319431 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:22 crc kubenswrapper[4689]: I0123 12:08:22.403537 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bfvpq" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="registry-server" probeResult="failure" output=< Jan 23 12:08:22 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:08:22 crc kubenswrapper[4689]: > Jan 23 12:08:31 crc kubenswrapper[4689]: I0123 12:08:31.383744 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:31 crc kubenswrapper[4689]: I0123 12:08:31.451721 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:32 crc kubenswrapper[4689]: I0123 12:08:32.188589 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bfvpq"] Jan 23 12:08:32 crc kubenswrapper[4689]: I0123 12:08:32.743497 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bfvpq" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="registry-server" containerID="cri-o://b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4" gracePeriod=2 Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.699337 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.760338 4689 generic.go:334] "Generic (PLEG): container finished" podID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerID="b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4" exitCode=0 Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.760391 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerDied","Data":"b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4"} Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.760421 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfvpq" event={"ID":"ed1a7018-1083-4a78-84e2-40281d48efe3","Type":"ContainerDied","Data":"bb8a3d9aeba1cceed42404e87bde441a25bad1432a0c89894ecd722c4a55ae5a"} Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.760441 4689 scope.go:117] "RemoveContainer" containerID="b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.760594 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfvpq" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.785685 4689 scope.go:117] "RemoveContainer" containerID="7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.804501 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-catalog-content\") pod \"ed1a7018-1083-4a78-84e2-40281d48efe3\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.804575 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-utilities\") pod \"ed1a7018-1083-4a78-84e2-40281d48efe3\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.804703 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hntj5\" (UniqueName: \"kubernetes.io/projected/ed1a7018-1083-4a78-84e2-40281d48efe3-kube-api-access-hntj5\") pod \"ed1a7018-1083-4a78-84e2-40281d48efe3\" (UID: \"ed1a7018-1083-4a78-84e2-40281d48efe3\") " Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.809355 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-utilities" (OuterVolumeSpecName: "utilities") pod "ed1a7018-1083-4a78-84e2-40281d48efe3" (UID: "ed1a7018-1083-4a78-84e2-40281d48efe3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.815129 4689 scope.go:117] "RemoveContainer" containerID="9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.845776 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed1a7018-1083-4a78-84e2-40281d48efe3-kube-api-access-hntj5" (OuterVolumeSpecName: "kube-api-access-hntj5") pod "ed1a7018-1083-4a78-84e2-40281d48efe3" (UID: "ed1a7018-1083-4a78-84e2-40281d48efe3"). InnerVolumeSpecName "kube-api-access-hntj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.908204 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.908238 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hntj5\" (UniqueName: \"kubernetes.io/projected/ed1a7018-1083-4a78-84e2-40281d48efe3-kube-api-access-hntj5\") on node \"crc\" DevicePath \"\"" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.918465 4689 scope.go:117] "RemoveContainer" containerID="b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4" Jan 23 12:08:33 crc kubenswrapper[4689]: E0123 12:08:33.918903 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4\": container with ID starting with b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4 not found: ID does not exist" containerID="b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.918939 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4"} err="failed to get container status \"b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4\": rpc error: code = NotFound desc = could not find container \"b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4\": container with ID starting with b370bd61b020a1f6ab1b3a6fdfaa30f26c3cd15e7555913cde4940e998f351a4 not found: ID does not exist" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.918965 4689 scope.go:117] "RemoveContainer" containerID="7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4" Jan 23 12:08:33 crc kubenswrapper[4689]: E0123 12:08:33.919382 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4\": container with ID starting with 7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4 not found: ID does not exist" containerID="7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.919409 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4"} err="failed to get container status \"7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4\": rpc error: code = NotFound desc = could not find container \"7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4\": container with ID starting with 7863952f8198c63624ab836a92beec82f53dcc9c0ea082832548dc09a6a057c4 not found: ID does not exist" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.919427 4689 scope.go:117] "RemoveContainer" containerID="9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d" Jan 23 12:08:33 crc kubenswrapper[4689]: E0123 12:08:33.919945 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d\": container with ID starting with 9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d not found: ID does not exist" containerID="9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.919980 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d"} err="failed to get container status \"9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d\": rpc error: code = NotFound desc = could not find container \"9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d\": container with ID starting with 9fbd0b52678cf5a9ba4f62cdfe78e88b1183f8a3bf38e43a21992b3f959f512d not found: ID does not exist" Jan 23 12:08:33 crc kubenswrapper[4689]: I0123 12:08:33.939209 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed1a7018-1083-4a78-84e2-40281d48efe3" (UID: "ed1a7018-1083-4a78-84e2-40281d48efe3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:08:34 crc kubenswrapper[4689]: I0123 12:08:34.010763 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed1a7018-1083-4a78-84e2-40281d48efe3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:08:34 crc kubenswrapper[4689]: I0123 12:08:34.104859 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bfvpq"] Jan 23 12:08:34 crc kubenswrapper[4689]: I0123 12:08:34.116193 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bfvpq"] Jan 23 12:08:35 crc kubenswrapper[4689]: I0123 12:08:35.665316 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" path="/var/lib/kubelet/pods/ed1a7018-1083-4a78-84e2-40281d48efe3/volumes" Jan 23 12:10:03 crc kubenswrapper[4689]: I0123 12:10:03.310665 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:10:03 crc kubenswrapper[4689]: I0123 12:10:03.311143 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.194550 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 23 12:10:11 crc kubenswrapper[4689]: E0123 12:10:11.195525 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="extract-utilities" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.195538 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="extract-utilities" Jan 23 12:10:11 crc kubenswrapper[4689]: E0123 12:10:11.195552 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="extract-content" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.195560 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="extract-content" Jan 23 12:10:11 crc kubenswrapper[4689]: E0123 12:10:11.195598 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="registry-server" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.195605 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="registry-server" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.195812 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed1a7018-1083-4a78-84e2-40281d48efe3" containerName="registry-server" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.196785 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.199779 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tpqqr" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.199817 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.200074 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.200081 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.208917 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.310259 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.310312 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-config-data\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.310455 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.310585 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.311011 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.311130 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.311176 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfqbp\" (UniqueName: \"kubernetes.io/projected/8c315092-e9fe-409e-81ca-39ac98b9fea6-kube-api-access-vfqbp\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.311278 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.311315 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.413198 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.413336 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.414636 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfqbp\" (UniqueName: \"kubernetes.io/projected/8c315092-e9fe-409e-81ca-39ac98b9fea6-kube-api-access-vfqbp\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.414737 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.414781 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.414844 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.414882 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-config-data\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.414914 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.414951 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.415403 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.415397 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.416375 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-config-data\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.416640 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.416946 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.421070 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.422295 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.425746 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.433031 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfqbp\" (UniqueName: \"kubernetes.io/projected/8c315092-e9fe-409e-81ca-39ac98b9fea6-kube-api-access-vfqbp\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.453602 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " pod="openstack/tempest-tests-tempest" Jan 23 12:10:11 crc kubenswrapper[4689]: I0123 12:10:11.532126 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 23 12:10:12 crc kubenswrapper[4689]: I0123 12:10:12.017265 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 23 12:10:12 crc kubenswrapper[4689]: I0123 12:10:12.039014 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 12:10:13 crc kubenswrapper[4689]: I0123 12:10:13.057501 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"8c315092-e9fe-409e-81ca-39ac98b9fea6","Type":"ContainerStarted","Data":"db62063870d5ffdb61507896753cb535482a06261abeb5c383bb78aaf8771dd2"} Jan 23 12:10:33 crc kubenswrapper[4689]: I0123 12:10:33.311659 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:10:33 crc kubenswrapper[4689]: I0123 12:10:33.312477 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:11:00 crc kubenswrapper[4689]: E0123 12:11:00.136682 4689 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Jan 23 12:11:00 crc kubenswrapper[4689]: E0123 12:11:00.138293 4689 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vfqbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(8c315092-e9fe-409e-81ca-39ac98b9fea6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 12:11:00 crc kubenswrapper[4689]: E0123 12:11:00.139525 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="8c315092-e9fe-409e-81ca-39ac98b9fea6" Jan 23 12:11:00 crc kubenswrapper[4689]: E0123 12:11:00.986434 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="8c315092-e9fe-409e-81ca-39ac98b9fea6" Jan 23 12:11:03 crc kubenswrapper[4689]: I0123 12:11:03.310542 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:11:03 crc kubenswrapper[4689]: I0123 12:11:03.310863 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:11:03 crc kubenswrapper[4689]: I0123 12:11:03.310907 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 12:11:03 crc kubenswrapper[4689]: I0123 12:11:03.311809 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 12:11:03 crc kubenswrapper[4689]: I0123 12:11:03.311872 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" gracePeriod=600 Jan 23 12:11:03 crc kubenswrapper[4689]: E0123 12:11:03.448041 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:11:04 crc kubenswrapper[4689]: I0123 12:11:04.015297 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" exitCode=0 Jan 23 12:11:04 crc kubenswrapper[4689]: I0123 12:11:04.015341 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51"} Jan 23 12:11:04 crc kubenswrapper[4689]: I0123 12:11:04.016089 4689 scope.go:117] "RemoveContainer" containerID="206a1b16f1604cff0d07556ce63f0cdaba1e0a91957274881f21375633285d2a" Jan 23 12:11:04 crc kubenswrapper[4689]: I0123 12:11:04.016769 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:11:04 crc kubenswrapper[4689]: E0123 12:11:04.017089 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:11:12 crc kubenswrapper[4689]: I0123 12:11:12.446673 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 23 12:11:14 crc kubenswrapper[4689]: I0123 12:11:14.188651 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"8c315092-e9fe-409e-81ca-39ac98b9fea6","Type":"ContainerStarted","Data":"ec83d2b63893c9b67e4f56af15a710ca5f53141c2ccfe908c8ba8da2348c3a8b"} Jan 23 12:11:14 crc kubenswrapper[4689]: I0123 12:11:14.281096 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.875947707 podStartE2EDuration="1m4.281078073s" podCreationTimestamp="2026-01-23 12:10:10 +0000 UTC" firstStartedPulling="2026-01-23 12:10:12.038802491 +0000 UTC m=+4876.663482340" lastFinishedPulling="2026-01-23 12:11:12.443932847 +0000 UTC m=+4937.068612706" observedRunningTime="2026-01-23 12:11:14.276414538 +0000 UTC m=+4938.901094417" watchObservedRunningTime="2026-01-23 12:11:14.281078073 +0000 UTC m=+4938.905757932" Jan 23 12:11:18 crc kubenswrapper[4689]: I0123 12:11:18.640720 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:11:18 crc kubenswrapper[4689]: E0123 12:11:18.641533 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:11:29 crc kubenswrapper[4689]: I0123 12:11:29.640559 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:11:29 crc kubenswrapper[4689]: E0123 12:11:29.641341 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:11:44 crc kubenswrapper[4689]: I0123 12:11:44.640780 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:11:44 crc kubenswrapper[4689]: E0123 12:11:44.641717 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.322875 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2r4zh"] Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.325646 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.336799 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2r4zh"] Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.486657 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-catalog-content\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.487100 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpdmn\" (UniqueName: \"kubernetes.io/projected/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-kube-api-access-cpdmn\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.487182 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-utilities\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.589965 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpdmn\" (UniqueName: \"kubernetes.io/projected/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-kube-api-access-cpdmn\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.590054 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-utilities\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.590197 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-catalog-content\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.590774 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-catalog-content\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.591343 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-utilities\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.614954 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpdmn\" (UniqueName: \"kubernetes.io/projected/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-kube-api-access-cpdmn\") pod \"community-operators-2r4zh\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:45 crc kubenswrapper[4689]: I0123 12:11:45.665086 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:46 crc kubenswrapper[4689]: I0123 12:11:46.694441 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2r4zh"] Jan 23 12:11:47 crc kubenswrapper[4689]: I0123 12:11:47.611627 4689 generic.go:334] "Generic (PLEG): container finished" podID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerID="5f387a5ff0bd94601ede0c008a13a45dad6b81bd7554703398a22e2453f434f8" exitCode=0 Jan 23 12:11:47 crc kubenswrapper[4689]: I0123 12:11:47.611864 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r4zh" event={"ID":"b59050fc-c6bd-427e-a99f-3f2d920d4e6d","Type":"ContainerDied","Data":"5f387a5ff0bd94601ede0c008a13a45dad6b81bd7554703398a22e2453f434f8"} Jan 23 12:11:47 crc kubenswrapper[4689]: I0123 12:11:47.612017 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r4zh" event={"ID":"b59050fc-c6bd-427e-a99f-3f2d920d4e6d","Type":"ContainerStarted","Data":"3031b538b98db850727355a62b4b9a2f8ae8f6e3e764e68a9a45e3f7e888b3dd"} Jan 23 12:11:49 crc kubenswrapper[4689]: I0123 12:11:49.664627 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r4zh" event={"ID":"b59050fc-c6bd-427e-a99f-3f2d920d4e6d","Type":"ContainerStarted","Data":"56fc768038f3d6efa4a33bcbf7e0589569978af72f6da1bfdd23f44fd1ca4c40"} Jan 23 12:11:51 crc kubenswrapper[4689]: I0123 12:11:51.683562 4689 generic.go:334] "Generic (PLEG): container finished" podID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerID="56fc768038f3d6efa4a33bcbf7e0589569978af72f6da1bfdd23f44fd1ca4c40" exitCode=0 Jan 23 12:11:51 crc kubenswrapper[4689]: I0123 12:11:51.683792 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r4zh" event={"ID":"b59050fc-c6bd-427e-a99f-3f2d920d4e6d","Type":"ContainerDied","Data":"56fc768038f3d6efa4a33bcbf7e0589569978af72f6da1bfdd23f44fd1ca4c40"} Jan 23 12:11:52 crc kubenswrapper[4689]: I0123 12:11:52.695680 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r4zh" event={"ID":"b59050fc-c6bd-427e-a99f-3f2d920d4e6d","Type":"ContainerStarted","Data":"fdf2164caab3cd954c67a0483a62c0286863aa3425d2cefdf15a904835845d58"} Jan 23 12:11:52 crc kubenswrapper[4689]: I0123 12:11:52.715070 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2r4zh" podStartSLOduration=3.118911795 podStartE2EDuration="7.715051805s" podCreationTimestamp="2026-01-23 12:11:45 +0000 UTC" firstStartedPulling="2026-01-23 12:11:47.613976366 +0000 UTC m=+4972.238656225" lastFinishedPulling="2026-01-23 12:11:52.210116376 +0000 UTC m=+4976.834796235" observedRunningTime="2026-01-23 12:11:52.712869411 +0000 UTC m=+4977.337549270" watchObservedRunningTime="2026-01-23 12:11:52.715051805 +0000 UTC m=+4977.339731664" Jan 23 12:11:55 crc kubenswrapper[4689]: I0123 12:11:55.665908 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:55 crc kubenswrapper[4689]: I0123 12:11:55.666579 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:11:56 crc kubenswrapper[4689]: I0123 12:11:56.732922 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-2r4zh" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="registry-server" probeResult="failure" output=< Jan 23 12:11:56 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:11:56 crc kubenswrapper[4689]: > Jan 23 12:11:59 crc kubenswrapper[4689]: I0123 12:11:59.641222 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:11:59 crc kubenswrapper[4689]: E0123 12:11:59.671470 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:12:05 crc kubenswrapper[4689]: I0123 12:12:05.732769 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:12:05 crc kubenswrapper[4689]: I0123 12:12:05.794987 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:12:05 crc kubenswrapper[4689]: I0123 12:12:05.984531 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2r4zh"] Jan 23 12:12:06 crc kubenswrapper[4689]: I0123 12:12:06.890866 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2r4zh" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="registry-server" containerID="cri-o://fdf2164caab3cd954c67a0483a62c0286863aa3425d2cefdf15a904835845d58" gracePeriod=2 Jan 23 12:12:07 crc kubenswrapper[4689]: I0123 12:12:07.959277 4689 generic.go:334] "Generic (PLEG): container finished" podID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerID="fdf2164caab3cd954c67a0483a62c0286863aa3425d2cefdf15a904835845d58" exitCode=0 Jan 23 12:12:07 crc kubenswrapper[4689]: I0123 12:12:07.959544 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r4zh" event={"ID":"b59050fc-c6bd-427e-a99f-3f2d920d4e6d","Type":"ContainerDied","Data":"fdf2164caab3cd954c67a0483a62c0286863aa3425d2cefdf15a904835845d58"} Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.343430 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.441885 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-catalog-content\") pod \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.442139 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-utilities\") pod \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.442191 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpdmn\" (UniqueName: \"kubernetes.io/projected/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-kube-api-access-cpdmn\") pod \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\" (UID: \"b59050fc-c6bd-427e-a99f-3f2d920d4e6d\") " Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.444179 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-utilities" (OuterVolumeSpecName: "utilities") pod "b59050fc-c6bd-427e-a99f-3f2d920d4e6d" (UID: "b59050fc-c6bd-427e-a99f-3f2d920d4e6d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.454854 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-kube-api-access-cpdmn" (OuterVolumeSpecName: "kube-api-access-cpdmn") pod "b59050fc-c6bd-427e-a99f-3f2d920d4e6d" (UID: "b59050fc-c6bd-427e-a99f-3f2d920d4e6d"). InnerVolumeSpecName "kube-api-access-cpdmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.514483 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b59050fc-c6bd-427e-a99f-3f2d920d4e6d" (UID: "b59050fc-c6bd-427e-a99f-3f2d920d4e6d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.547780 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.547812 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.547822 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpdmn\" (UniqueName: \"kubernetes.io/projected/b59050fc-c6bd-427e-a99f-3f2d920d4e6d-kube-api-access-cpdmn\") on node \"crc\" DevicePath \"\"" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.971441 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2r4zh" event={"ID":"b59050fc-c6bd-427e-a99f-3f2d920d4e6d","Type":"ContainerDied","Data":"3031b538b98db850727355a62b4b9a2f8ae8f6e3e764e68a9a45e3f7e888b3dd"} Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.971495 4689 scope.go:117] "RemoveContainer" containerID="fdf2164caab3cd954c67a0483a62c0286863aa3425d2cefdf15a904835845d58" Jan 23 12:12:08 crc kubenswrapper[4689]: I0123 12:12:08.971497 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2r4zh" Jan 23 12:12:09 crc kubenswrapper[4689]: I0123 12:12:09.001455 4689 scope.go:117] "RemoveContainer" containerID="56fc768038f3d6efa4a33bcbf7e0589569978af72f6da1bfdd23f44fd1ca4c40" Jan 23 12:12:09 crc kubenswrapper[4689]: I0123 12:12:09.022457 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2r4zh"] Jan 23 12:12:09 crc kubenswrapper[4689]: I0123 12:12:09.034965 4689 scope.go:117] "RemoveContainer" containerID="5f387a5ff0bd94601ede0c008a13a45dad6b81bd7554703398a22e2453f434f8" Jan 23 12:12:09 crc kubenswrapper[4689]: I0123 12:12:09.042982 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2r4zh"] Jan 23 12:12:09 crc kubenswrapper[4689]: I0123 12:12:09.656393 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" path="/var/lib/kubelet/pods/b59050fc-c6bd-427e-a99f-3f2d920d4e6d/volumes" Jan 23 12:12:13 crc kubenswrapper[4689]: I0123 12:12:13.640812 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:12:13 crc kubenswrapper[4689]: E0123 12:12:13.641422 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:12:24 crc kubenswrapper[4689]: I0123 12:12:24.795816 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:12:24 crc kubenswrapper[4689]: E0123 12:12:24.796833 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:12:31 crc kubenswrapper[4689]: I0123 12:12:31.355900 4689 scope.go:117] "RemoveContainer" containerID="49511488ae79290fd7c7bf1c1b066b7144707bae93e52c117849446a31a2e982" Jan 23 12:12:31 crc kubenswrapper[4689]: I0123 12:12:31.420765 4689 scope.go:117] "RemoveContainer" containerID="e25f531e28ef1ac692234b09415761e6a7d1af331fbbd04d7b1de59b0dae0d7f" Jan 23 12:12:31 crc kubenswrapper[4689]: I0123 12:12:31.499328 4689 scope.go:117] "RemoveContainer" containerID="16c96ba1b01194f5ab2f103ee56c2003c3a85b8de8682ce44cde38ef92ec45c6" Jan 23 12:12:31 crc kubenswrapper[4689]: I0123 12:12:31.762835 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:12:33 crc kubenswrapper[4689]: I0123 12:12:33.776725 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:12:33 crc kubenswrapper[4689]: I0123 12:12:33.777031 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:12:35 crc kubenswrapper[4689]: I0123 12:12:35.518549 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded" start-of-body= Jan 23 12:12:35 crc kubenswrapper[4689]: I0123 12:12:35.519733 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded" Jan 23 12:12:35 crc kubenswrapper[4689]: I0123 12:12:35.525330 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded" start-of-body= Jan 23 12:12:35 crc kubenswrapper[4689]: I0123 12:12:35.525395 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded" Jan 23 12:12:35 crc kubenswrapper[4689]: I0123 12:12:35.541328 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:12:35 crc kubenswrapper[4689]: I0123 12:12:35.777222 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:12:35 crc kubenswrapper[4689]: I0123 12:12:35.777682 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:12:37 crc kubenswrapper[4689]: I0123 12:12:37.644946 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:12:37 crc kubenswrapper[4689]: E0123 12:12:37.646706 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:12:50 crc kubenswrapper[4689]: I0123 12:12:50.643112 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:12:50 crc kubenswrapper[4689]: E0123 12:12:50.645559 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:13:00 crc kubenswrapper[4689]: I0123 12:13:00.918381 4689 patch_prober.go:28] interesting pod/metrics-server-56b6c6f75d-w9wz2 container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:00 crc kubenswrapper[4689]: I0123 12:13:00.920507 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" podUID="c09648d1-cecf-420d-8b1c-226eb880a7a3" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:01 crc kubenswrapper[4689]: I0123 12:13:01.288829 4689 patch_prober.go:28] interesting pod/monitoring-plugin-78f56cd898-f5fg2 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:01 crc kubenswrapper[4689]: I0123 12:13:01.289536 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" podUID="75df6af8-26fb-433c-99c0-da4b88e4796d" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:01 crc kubenswrapper[4689]: I0123 12:13:01.761322 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:05 crc kubenswrapper[4689]: I0123 12:13:05.522209 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:05 crc kubenswrapper[4689]: I0123 12:13:05.523195 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:05 crc kubenswrapper[4689]: I0123 12:13:05.532915 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:05 crc kubenswrapper[4689]: I0123 12:13:05.532979 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:05 crc kubenswrapper[4689]: I0123 12:13:05.653727 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:13:05 crc kubenswrapper[4689]: E0123 12:13:05.655527 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:13:06 crc kubenswrapper[4689]: I0123 12:13:06.998291 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:06 crc kubenswrapper[4689]: I0123 12:13:06.998370 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:06.998572 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:06.998617 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:06.999424 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:06.999469 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:06.999822 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:06.999876 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:07.457769 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:07.458197 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:07.810663 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:07.810729 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:07.811099 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:07 crc kubenswrapper[4689]: I0123 12:13:07.811267 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:08 crc kubenswrapper[4689]: I0123 12:13:08.091484 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:08 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:08 crc kubenswrapper[4689]: > Jan 23 12:13:08 crc kubenswrapper[4689]: I0123 12:13:08.091500 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:08 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:08 crc kubenswrapper[4689]: > Jan 23 12:13:08 crc kubenswrapper[4689]: I0123 12:13:08.777544 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:08 crc kubenswrapper[4689]: I0123 12:13:08.777596 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.243598 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.243793 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.331437 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.331483 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.413390 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.413637 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.507523 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.507894 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.525964 4689 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-vllhz container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.526110 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podUID="72abaa76-42ea-4987-8f23-f4aba4f669e2" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.641717 4689 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rgsmc container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.641800 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podUID="8297556c-bbae-4eb0-b3da-b09a005c90f6" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.773529 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.773607 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.774070 4689 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-6sm7h container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.774171 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.774170 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podUID="faf0752a-d119-41d3-913f-6377a601e8ca" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.774195 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.933478 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.996599 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:09 crc kubenswrapper[4689]: I0123 12:13:09.996641 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.078294 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" podUID="72fb2e87-da8d-4db1-b255-d38d7c15b5cd" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.078398 4689 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-7qpjs container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.078475 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.078484 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.078544 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" podUID="72fb2e87-da8d-4db1-b255-d38d7c15b5cd" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.411386 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.411775 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.518368 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.518440 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.518502 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.518540 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.526512 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.526580 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.582800 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.583076 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.583282 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.582819 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.582872 4689 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-vllhz container/loki-distributor namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.52:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.583348 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podUID="72abaa76-42ea-4987-8f23-f4aba4f669e2" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.641862 4689 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rgsmc container/loki-querier namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.641916 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podUID="8297556c-bbae-4eb0-b3da-b09a005c90f6" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.662414 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.58:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.662491 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.58:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.723456 4689 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.723853 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-compactor-0" podUID="79a8bb59-41ce-4777-90af-ded6dfe2e080" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.724629 4689 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-6sm7h container/loki-query-frontend namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.724688 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podUID="faf0752a-d119-41d3-913f-6377a601e8ca" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.776645 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.777824 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.778717 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.791340 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.791404 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.955405 4689 patch_prober.go:28] interesting pod/metrics-server-56b6c6f75d-w9wz2 container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.955470 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" podUID="c09648d1-cecf-420d-8b1c-226eb880a7a3" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.955412 4689 patch_prober.go:28] interesting pod/metrics-server-56b6c6f75d-w9wz2 container/metrics-server namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:10 crc kubenswrapper[4689]: I0123 12:13:10.955522 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" podUID="c09648d1-cecf-420d-8b1c-226eb880a7a3" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.072310 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.072361 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.289272 4689 patch_prober.go:28] interesting pod/monitoring-plugin-78f56cd898-f5fg2 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.289338 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" podUID="75df6af8-26fb-433c-99c0-da4b88e4796d" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.518096 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.55:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.518174 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.518207 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.55:8081/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.518278 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.525841 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.57:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.525914 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.525997 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.57:8081/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.526027 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.662018 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.58:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.662456 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.58:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.724038 4689 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.75:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.724118 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-compactor-0" podUID="79a8bb59-41ce-4777-90af-ded6dfe2e080" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.75:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.761372 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.777489 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z5sn9" podUID="90c7af03-d2b6-45ef-b228-d5621bf1f671" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.790903 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.80:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.790995 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.926605 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.926633 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.926672 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:11 crc kubenswrapper[4689]: I0123 12:13:11.926649 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.058425 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:12 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:12 crc kubenswrapper[4689]: > Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.058442 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:12 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:12 crc kubenswrapper[4689]: > Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.058550 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:12 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:12 crc kubenswrapper[4689]: > Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.058571 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:12 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:12 crc kubenswrapper[4689]: > Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.181389 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podUID="4922b965-fa40-47b5-b388-e63767b62a97" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.182357 4689 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-2r5kt container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.182481 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" podUID="83cfd8ec-2928-4cd8-a14c-330cce17bfd5" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.205309 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:12 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:12 crc kubenswrapper[4689]: > Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.205802 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:12 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:12 crc kubenswrapper[4689]: > Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.472379 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.472413 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.472436 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:12 crc kubenswrapper[4689]: I0123 12:13:12.472417 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:13 crc kubenswrapper[4689]: I0123 12:13:13.047412 4689 patch_prober.go:28] interesting pod/loki-operator-controller-manager-775d8c8b9c-rkqj2 container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:13 crc kubenswrapper[4689]: I0123 12:13:13.047542 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podUID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:13 crc kubenswrapper[4689]: I0123 12:13:13.358409 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:13 crc kubenswrapper[4689]: I0123 12:13:13.358739 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:13 crc kubenswrapper[4689]: I0123 12:13:13.785640 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:13 crc kubenswrapper[4689]: I0123 12:13:13.785648 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:13 crc kubenswrapper[4689]: I0123 12:13:13.797734 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.001614 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.001696 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.001702 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.001792 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.041158 4689 trace.go:236] Trace[1714810045]: "Calculate volume metrics of storage for pod minio-dev/minio" (23-Jan-2026 12:13:09.113) (total time: 4892ms): Jan 23 12:13:14 crc kubenswrapper[4689]: Trace[1714810045]: [4.892205009s] [4.892205009s] END Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.041202 4689 trace.go:236] Trace[1567532689]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-server-1" (23-Jan-2026 12:13:08.835) (total time: 5170ms): Jan 23 12:13:14 crc kubenswrapper[4689]: Trace[1567532689]: [5.170134835s] [5.170134835s] END Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.041171 4689 trace.go:236] Trace[1664549783]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-cell1-server-0" (23-Jan-2026 12:13:08.092) (total time: 5912ms): Jan 23 12:13:14 crc kubenswrapper[4689]: Trace[1664549783]: [5.9128675s] [5.9128675s] END Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.057593 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.057646 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.062935 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.062967 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.408452 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:14 crc kubenswrapper[4689]: timeout: health rpc did not complete within 1s Jan 23 12:13:14 crc kubenswrapper[4689]: > Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.408452 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:14 crc kubenswrapper[4689]: timeout: health rpc did not complete within 1s Jan 23 12:13:14 crc kubenswrapper[4689]: > Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.777529 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.778090 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.971047 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.11:8080/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:14 crc kubenswrapper[4689]: I0123 12:13:14.971073 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.11:8081/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.106747 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.506242 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.506284 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.506308 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.506413 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.525992 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.526100 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587337 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587390 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587527 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587546 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587591 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587603 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587642 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587705 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587760 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.587806 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.780970 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podUID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.781081 4689 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-9gvwz container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.781365 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.782270 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.782776 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.783866 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:15 crc kubenswrapper[4689]: I0123 12:13:15.784690 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" podUID="9d058744-ff52-4a7c-8e44-86c81270e7d1" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.036371 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" podUID="8359ad74-2a40-4f5f-afe6-880a3f0a990e" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.168634 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.169305 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.168691 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.169572 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.229545 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.229617 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.229642 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.229875 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411398 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411427 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411486 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411558 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411597 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411601 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411619 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.411672 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.491100 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.491170 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.491564 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.491503 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.803647 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.803716 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.803642 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.803879 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.944808 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:16 crc kubenswrapper[4689]: I0123 12:13:16.944827 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.457761 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.457844 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.488605 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.488688 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.488633 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.488830 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.810914 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.810982 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.811042 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:17 crc kubenswrapper[4689]: I0123 12:13:17.810982 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:18 crc kubenswrapper[4689]: I0123 12:13:18.459416 4689 patch_prober.go:28] interesting pod/console-5859dc97b8-47f8f container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.136:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:18 crc kubenswrapper[4689]: I0123 12:13:18.459713 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-5859dc97b8-47f8f" podUID="5d2085e7-92df-4502-97e9-66dfbfae189a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.136:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:18 crc kubenswrapper[4689]: I0123 12:13:18.779520 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:18 crc kubenswrapper[4689]: I0123 12:13:18.780008 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:18 crc kubenswrapper[4689]: I0123 12:13:18.782230 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.052565 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podUID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.097364 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" podUID="1f6a7f15-609b-414e-8119-366afe98811f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.202371 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.289331 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.391408 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.433404 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podUID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.433495 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podUID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.485357 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.485428 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.485489 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.485508 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.485510 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.525481 4689 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-vllhz container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.525541 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podUID="72abaa76-42ea-4987-8f23-f4aba4f669e2" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.578702 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" podUID="5f4d15d8-f941-4082-ab51-3ecda5527f9b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.658320 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" podUID="2bd7a193-5394-452e-9315-0332e4a4e667" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.658757 4689 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rgsmc container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.658811 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podUID="8297556c-bbae-4eb0-b3da-b09a005c90f6" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.773545 4689 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-6sm7h container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.773609 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podUID="faf0752a-d119-41d3-913f-6377a601e8ca" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.773642 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.773701 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.773775 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.773798 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.777381 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podUID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerName="ovnkube-controller" probeResult="failure" output="command timed out" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.815428 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" podUID="d36ac685-507d-4cfa-b6fe-7f595536c32f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:19 crc kubenswrapper[4689]: I0123 12:13:19.891440 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.015491 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.056382 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" podUID="72fb2e87-da8d-4db1-b255-d38d7c15b5cd" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.056748 4689 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-7qpjs container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.056782 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.056819 4689 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-7qpjs container/perses-operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.17:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.056832 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.097316 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podUID="167d35d1-8eb3-492e-beb3-4325d183c7b9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.246365 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" podUID="53597531-35c9-4478-95cc-690c554f04d0" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.99:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.246465 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" podUID="53597531-35c9-4478-95cc-690c554f04d0" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.372408 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.442364 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" podUID="7ad0b754-e721-4b19-b0b6-a7e1200a48d4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.582358 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.582424 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623348 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623390 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623429 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623435 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623479 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623490 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623510 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.623528 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.624498 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" podUID="af5f2d1f-74a0-4ac2-9e78-c81c3815f722" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.643035 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:13:20 crc kubenswrapper[4689]: E0123 12:13:20.643836 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.661935 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.662006 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.724138 4689 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.724216 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-compactor-0" podUID="79a8bb59-41ce-4777-90af-ded6dfe2e080" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.778315 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.778432 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.780713 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.781377 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.781711 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.781786 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.783717 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-central-agent" containerStatusID={"Type":"cri-o","ID":"9c32f1149eaa11f33916981fc8fd2ba53b5fc5ff3ce5418895c08eb8a0538fef"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-central-agent failed liveness probe, will be restarted" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.784093 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" containerID="cri-o://9c32f1149eaa11f33916981fc8fd2ba53b5fc5ff3ce5418895c08eb8a0538fef" gracePeriod=30 Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.792037 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.792108 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.873617 4689 patch_prober.go:28] interesting pod/metrics-server-56b6c6f75d-w9wz2 container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.873957 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" podUID="c09648d1-cecf-420d-8b1c-226eb880a7a3" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.874012 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.876715 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="metrics-server" containerStatusID={"Type":"cri-o","ID":"38aa0cd5f5288400c6515cf6a9235754c9a90fa712fa7bcc350799d57840292e"} pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" containerMessage="Container metrics-server failed liveness probe, will be restarted" Jan 23 12:13:20 crc kubenswrapper[4689]: I0123 12:13:20.876784 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" podUID="c09648d1-cecf-420d-8b1c-226eb880a7a3" containerName="metrics-server" containerID="cri-o://38aa0cd5f5288400c6515cf6a9235754c9a90fa712fa7bcc350799d57840292e" gracePeriod=170 Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.073404 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.073434 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.181939 4689 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-pjr88 container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.182063 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podUID="64f769e0-be75-4b1f-8cbb-587842d51589" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.182000 4689 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-pjr88 container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.182626 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podUID="64f769e0-be75-4b1f-8cbb-587842d51589" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.288989 4689 patch_prober.go:28] interesting pod/monitoring-plugin-78f56cd898-f5fg2 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.289058 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" podUID="75df6af8-26fb-433c-99c0-da4b88e4796d" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.289159 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.925438 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.925491 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.925475 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.925600 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-pkdqh" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.925608 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.925438 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.927381 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="frr" containerStatusID={"Type":"cri-o","ID":"0bbf2301f466f95f4024c28175d4df0efc1cfdd968533398bbb13b728a6de448"} pod="metallb-system/frr-k8s-pkdqh" containerMessage="Container frr failed liveness probe, will be restarted" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.927614 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" containerID="cri-o://0bbf2301f466f95f4024c28175d4df0efc1cfdd968533398bbb13b728a6de448" gracePeriod=2 Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.943781 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/healthy\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:21 crc kubenswrapper[4689]: I0123 12:13:21.943963 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.162355 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podUID="4922b965-fa40-47b5-b388-e63767b62a97" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.162394 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podUID="4922b965-fa40-47b5-b388-e63767b62a97" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.162348 4689 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-2r5kt container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.162449 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" podUID="83cfd8ec-2928-4cd8-a14c-330cce17bfd5" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.290292 4689 patch_prober.go:28] interesting pod/monitoring-plugin-78f56cd898-f5fg2 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.290352 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" podUID="75df6af8-26fb-433c-99c0-da4b88e4796d" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472391 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472421 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472453 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472822 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472478 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472881 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472491 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.472926 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.781835 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.782293 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z5sn9" podUID="90c7af03-d2b6-45ef-b228-d5621bf1f671" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.942391 4689 patch_prober.go:28] interesting pod/loki-operator-controller-manager-775d8c8b9c-rkqj2 container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.942444 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podUID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.942459 4689 patch_prober.go:28] interesting pod/loki-operator-controller-manager-775d8c8b9c-rkqj2 container/manager namespace/openshift-operators-redhat: Liveness probe status=failure output="Get \"http://10.217.0.49:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:22 crc kubenswrapper[4689]: I0123 12:13:22.942533 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podUID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.045344 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.045409 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.357363 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.357403 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.742523 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.742693 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.777666 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.777695 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.777833 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.780290 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podUID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerName="sbdb" probeResult="failure" output="command timed out" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.780305 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.780400 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:23 crc kubenswrapper[4689]: I0123 12:13:23.780405 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podUID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerName="nbdb" probeResult="failure" output="command timed out" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.002929 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.002996 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.058659 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.058932 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.058660 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.058980 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.317550 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.317569 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.9:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.317624 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.317634 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.339617 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerDied","Data":"0bbf2301f466f95f4024c28175d4df0efc1cfdd968533398bbb13b728a6de448"} Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.340005 4689 generic.go:334] "Generic (PLEG): container finished" podID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerID="0bbf2301f466f95f4024c28175d4df0efc1cfdd968533398bbb13b728a6de448" exitCode=143 Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.765330 4689 patch_prober.go:28] interesting pod/apiserver-76f77b778f-gkpnx container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.765379 4689 patch_prober.go:28] interesting pod/apiserver-76f77b778f-gkpnx container/openshift-apiserver namespace/openshift-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.14:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.765420 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" podUID="05112afa-e07a-4342-9d37-2a6b9b7ac9a2" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.765383 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" podUID="05112afa-e07a-4342-9d37-2a6b9b7ac9a2" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: E0123 12:13:24.771161 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T12:13:14Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T12:13:14Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T12:13:14Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T12:13:14Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.970448 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.11:8080/livez\": context deadline exceeded" Jan 23 12:13:24 crc kubenswrapper[4689]: I0123 12:13:24.970532 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.11:8081/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.417522 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.417589 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.417873 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.418091 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.458464 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.458546 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.458550 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.458714 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.459393 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.459436 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.463220 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"2cc243468ad8665b4783027e70c09b12e489c4842e28679a5bcbefdbef595e4b"} pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.463767 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" containerID="cri-o://2cc243468ad8665b4783027e70c09b12e489c4842e28679a5bcbefdbef595e4b" gracePeriod=30 Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.518608 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.518682 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.525019 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.525056 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.559980 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.560067 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.595772 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.595837 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.595775 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.595782 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.595906 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.595801 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.595938 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.774464 4689 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-9gvwz container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.774522 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" podUID="9d058744-ff52-4a7c-8e44-86c81270e7d1" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.774461 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podUID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.774460 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podUID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965600 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965624 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965612 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965666 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965717 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965739 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965694 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.965615 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:25 crc kubenswrapper[4689]: I0123 12:13:25.966350 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.078339 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" podUID="8359ad74-2a40-4f5f-afe6-880a3f0a990e" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.078339 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" podUID="8359ad74-2a40-4f5f-afe6-880a3f0a990e" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.167746 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.168090 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.167749 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.168144 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.229701 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.229710 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.229851 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.229763 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410322 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410360 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410386 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410399 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410423 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410444 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410367 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.410474 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.465238 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.465299 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.491713 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.491768 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.491815 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.491842 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.491946 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.492079 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.689436 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="catalog-operator" containerStatusID={"Type":"cri-o","ID":"3de974a3877b9a09e6527379e5e4d1e09cdb486c9d914572bfd81537fbde208f"} pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" containerMessage="Container catalog-operator failed liveness probe, will be restarted" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.689544 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" containerID="cri-o://3de974a3877b9a09e6527379e5e4d1e09cdb486c9d914572bfd81537fbde208f" gracePeriod=30 Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.781840 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" probeResult="failure" output="command timed out" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.781844 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" probeResult="failure" output="command timed out" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.802299 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.802374 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.802395 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.802493 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.802431 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.806736 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller-manager" containerStatusID={"Type":"cri-o","ID":"454a4c74dbb6a1652d2c0e501a9c5aa9bf1591e3de67f7eb8f95d1bf4f84cab4"} pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" containerMessage="Container controller-manager failed liveness probe, will be restarted" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.806818 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" containerID="cri-o://454a4c74dbb6a1652d2c0e501a9c5aa9bf1591e3de67f7eb8f95d1bf4f84cab4" gracePeriod=30 Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.943839 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/healthy\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.943965 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:26 crc kubenswrapper[4689]: I0123 12:13:26.944071 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.458524 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.74:9091/-/healthy\": context deadline exceeded" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.458839 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/healthy\": context deadline exceeded" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.458518 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.458693 4689 patch_prober.go:28] interesting pod/console-5859dc97b8-47f8f container/console namespace/openshift-console: Liveness probe status=failure output="Get \"https://10.217.0.136:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.458965 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.458996 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/console-5859dc97b8-47f8f" podUID="5d2085e7-92df-4502-97e9-66dfbfae189a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.136:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.459062 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.460120 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="console" containerStatusID={"Type":"cri-o","ID":"466f16cbff91ecef476fbf44e262293aa570836a2c90bc368fc4dc6e34942c0b"} pod="openshift-console/console-5859dc97b8-47f8f" containerMessage="Container console failed liveness probe, will be restarted" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.460743 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.460772 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.489924 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.490187 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.490073 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.490452 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.781739 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z5sn9" podUID="90c7af03-d2b6-45ef-b228-d5621bf1f671" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.781791 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.781726 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.805474 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"f89580cd295153596a0d3fe582292a8a37eaf502afd2a0f7cb089816af778a52"} Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.809967 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.810027 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.812126 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": context deadline exceeded" start-of-body= Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.812213 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": context deadline exceeded" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.812260 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.813347 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="route-controller-manager" containerStatusID={"Type":"cri-o","ID":"1310f2493d20a187538f0e0e33f0c733c42bb03aef4a7942b922c34994a6316c"} pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" containerMessage="Container route-controller-manager failed liveness probe, will be restarted" Jan 23 12:13:27 crc kubenswrapper[4689]: I0123 12:13:27.813407 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" containerID="cri-o://1310f2493d20a187538f0e0e33f0c733c42bb03aef4a7942b922c34994a6316c" gracePeriod=30 Jan 23 12:13:28 crc kubenswrapper[4689]: I0123 12:13:28.364383 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:28 crc kubenswrapper[4689]: I0123 12:13:28.364745 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:28 crc kubenswrapper[4689]: I0123 12:13:28.502474 4689 patch_prober.go:28] interesting pod/console-5859dc97b8-47f8f container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.136:8443/health\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:28 crc kubenswrapper[4689]: I0123 12:13:28.502550 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-5859dc97b8-47f8f" podUID="5d2085e7-92df-4502-97e9-66dfbfae189a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.136:8443/health\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.091379 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podUID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.091424 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podUID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.174571 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" podUID="1f6a7f15-609b-414e-8119-366afe98811f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.174780 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" podUID="1f6a7f15-609b-414e-8119-366afe98811f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.256323 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.256420 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.256435 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.297480 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.297615 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.339341 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.464355 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podUID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.590556 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podUID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.590818 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.590938 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.632645 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" podUID="b5e62e31-60a7-4964-b3e7-611e7a8bfa81" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.673369 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" podUID="b5e62e31-60a7-4964-b3e7-611e7a8bfa81" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.673668 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.715886 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podUID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.716009 4689 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-vllhz container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.716033 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podUID="72abaa76-42ea-4987-8f23-f4aba4f669e2" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.716105 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.715779 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.716853 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.780065 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.780163 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.800372 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" podUID="5f4d15d8-f941-4082-ab51-3ecda5527f9b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.800477 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podUID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.882316 4689 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rgsmc container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.883100 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podUID="8297556c-bbae-4eb0-b3da-b09a005c90f6" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.883215 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.965510 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.965576 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.965624 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.966040 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.966243 4689 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-6sm7h container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.966274 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podUID="faf0752a-d119-41d3-913f-6377a601e8ca" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.966361 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.967180 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="operator" containerStatusID={"Type":"cri-o","ID":"69e3263f615b7a5c939fd2604f8e31d807c6746b23f541b888f7e11cf09c7d2e"} pod="openshift-operators/observability-operator-59bdc8b94-95bv6" containerMessage="Container operator failed liveness probe, will be restarted" Jan 23 12:13:29 crc kubenswrapper[4689]: I0123 12:13:29.967625 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" containerID="cri-o://69e3263f615b7a5c939fd2604f8e31d807c6746b23f541b888f7e11cf09c7d2e" gracePeriod=30 Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.050333 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" podUID="d36ac685-507d-4cfa-b6fe-7f595536c32f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.050334 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" podUID="5f4d15d8-f941-4082-ab51-3ecda5527f9b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.050405 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" podUID="2bd7a193-5394-452e-9315-0332e4a4e667" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.134343 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.175348 4689 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-7qpjs container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.175395 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" podUID="2bd7a193-5394-452e-9315-0332e4a4e667" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.175432 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.175664 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.258602 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.258617 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.258673 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.258765 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.259061 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" podUID="d36ac685-507d-4cfa-b6fe-7f595536c32f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.259271 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.259410 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.342467 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.342490 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podUID="167d35d1-8eb3-492e-beb3-4325d183c7b9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.342788 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.342604 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.342831 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.383484 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" podUID="53597531-35c9-4478-95cc-690c554f04d0" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.383534 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.424942 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.466330 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podUID="167d35d1-8eb3-492e-beb3-4325d183c7b9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.466421 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" podUID="c9dc7063-1b29-40e1-b451-e9dc882e7476" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.591622 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.720503 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-pkdqh" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.778265 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.778354 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ovn-northd-0" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.778456 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" probeResult="failure" output="command timed out" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.778633 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.779482 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ovn-northd" containerStatusID={"Type":"cri-o","ID":"0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4"} pod="openstack/ovn-northd-0" containerMessage="Container ovn-northd failed liveness probe, will be restarted" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.779535 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" containerID="cri-o://0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" gracePeriod=30 Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798342 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798422 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798497 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798516 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798514 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798503 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="Get \"http://10.217.0.35:9898/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798568 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798579 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798544 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798656 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.798710 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.839748 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.839819 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.839839 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.839889 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.840164 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" podUID="7ad0b754-e721-4b19-b0b6-a7e1200a48d4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.840379 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" podUID="7ad0b754-e721-4b19-b0b6-a7e1200a48d4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.840444 4689 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-vllhz container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.840461 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podUID="72abaa76-42ea-4987-8f23-f4aba4f669e2" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.880492 4689 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.880554 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-compactor-0" podUID="79a8bb59-41ce-4777-90af-ded6dfe2e080" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.880558 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" podUID="f451d39d-2f3f-4c53-b5a2-d8e7f74247f9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:30 crc kubenswrapper[4689]: I0123 12:13:30.880658 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:30.965449 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" podUID="f451d39d-2f3f-4c53-b5a2-d8e7f74247f9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:30.965553 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:30.965575 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:30.965634 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:30.965636 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" podUID="af5f2d1f-74a0-4ac2-9e78-c81c3815f722" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047438 4689 patch_prober.go:28] interesting pod/metrics-server-56b6c6f75d-w9wz2 container/metrics-server namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047506 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" podUID="c09648d1-cecf-420d-8b1c-226eb880a7a3" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.76:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047594 4689 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rgsmc container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047676 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podUID="8297556c-bbae-4eb0-b3da-b09a005c90f6" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047725 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047777 4689 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-6sm7h container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047796 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podUID="faf0752a-d119-41d3-913f-6377a601e8ca" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.047774 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-api-84b55874c-d6r8c" podUID="a256dd27-3435-4bcb-9ca0-46a0d472325b" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.1.21:8004/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.131425 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-79f99b898-dt7zp" podUID="04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.1.22:8000/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.131463 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.131495 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.131509 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-cfnapi-79f99b898-dt7zp" podUID="04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.1.22:8000/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.131521 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.131465 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-84b55874c-d6r8c" podUID="a256dd27-3435-4bcb-9ca0-46a0d472325b" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.1.21:8004/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.131750 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.133228 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="webhook-server" containerStatusID={"Type":"cri-o","ID":"c09b7791ff52d5b3704d52c0a91c3db861e0537267bebe09479b9a050c7febd4"} pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" containerMessage="Container webhook-server failed liveness probe, will be restarted" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.219339 4689 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-pjr88 container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.219408 4689 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-pjr88 container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.219382 4689 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-7qpjs container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.219445 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podUID="64f769e0-be75-4b1f-8cbb-587842d51589" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.219464 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.219505 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podUID="64f769e0-be75-4b1f-8cbb-587842d51589" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.301412 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" podUID="3369528a-f39f-4e47-92e9-abbca4395b98" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.301506 4689 patch_prober.go:28] interesting pod/monitoring-plugin-78f56cd898-f5fg2 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.301526 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" podUID="75df6af8-26fb-433c-99c0-da4b88e4796d" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.384417 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.619554 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" containerID="cri-o://c09b7791ff52d5b3704d52c0a91c3db861e0537267bebe09479b9a050c7febd4" gracePeriod=2 Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.661332 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.661434 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.762227 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.762335 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-pkdqh" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.763485 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller" containerStatusID={"Type":"cri-o","ID":"c69e86b619f0f860ad22540bd294c686b0bcaff89701ee99aa0ff389609113bd"} pod="metallb-system/frr-k8s-pkdqh" containerMessage="Container controller failed liveness probe, will be restarted" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.763621 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" containerID="cri-o://c69e86b619f0f860ad22540bd294c686b0bcaff89701ee99aa0ff389609113bd" gracePeriod=2 Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.777692 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z5sn9" podUID="90c7af03-d2b6-45ef-b228-d5621bf1f671" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.777794 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.781939 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.782036 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.782116 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.782412 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.783100 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9"} pod="openstack-operators/openstack-operator-index-d9bfx" containerMessage="Container registry-server failed liveness probe, will be restarted" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.783158 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" containerID="cri-o://4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" gracePeriod=30 Jan 23 12:13:31 crc kubenswrapper[4689]: E0123 12:13:31.786583 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:31 crc kubenswrapper[4689]: E0123 12:13:31.787989 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:31 crc kubenswrapper[4689]: E0123 12:13:31.789187 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:31 crc kubenswrapper[4689]: E0123 12:13:31.789226 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.930391 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.931399 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.930492 4689 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971474 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.58:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971544 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.58:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971492 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971617 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971652 4689 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971682 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-compactor-0" podUID="79a8bb59-41ce-4777-90af-ded6dfe2e080" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971715 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971766 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-pkdqh" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971821 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971832 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971880 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:31 crc kubenswrapper[4689]: I0123 12:13:31.971907 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.121351 4689 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-2r5kt container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.121390 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podUID="4922b965-fa40-47b5-b388-e63767b62a97" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.121432 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" podUID="83cfd8ec-2928-4cd8-a14c-330cce17bfd5" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.121510 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.121553 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.173442 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.305024 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-z5sn9" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.457942 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.458021 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.474411 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.474437 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.474541 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.474564 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.475569 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller" containerStatusID={"Type":"cri-o","ID":"f7c8f26242835071c3b6ce1bf32e2ae2945dcb1b1f36757408a9a2e51abf8dfc"} pod="metallb-system/controller-6968d8fdc4-mtbb4" containerMessage="Container controller failed liveness probe, will be restarted" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.475638 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" containerID="cri-o://f7c8f26242835071c3b6ce1bf32e2ae2945dcb1b1f36757408a9a2e51abf8dfc" gracePeriod=2 Jan 23 12:13:32 crc kubenswrapper[4689]: E0123 12:13:32.551338 4689 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.772895 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.773209 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.778200 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podUID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerName="sbdb" probeResult="failure" output="command timed out" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.790814 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podUID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerName="nbdb" probeResult="failure" output="command timed out" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.876431 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" event={"ID":"f9dd4a75-d3a3-42d6-a305-43e95e450611","Type":"ContainerDied","Data":"3de974a3877b9a09e6527379e5e4d1e09cdb486c9d914572bfd81537fbde208f"} Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.876471 4689 generic.go:334] "Generic (PLEG): container finished" podID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerID="3de974a3877b9a09e6527379e5e4d1e09cdb486c9d914572bfd81537fbde208f" exitCode=0 Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.877274 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="frr-k8s-webhook-server" containerStatusID={"Type":"cri-o","ID":"3bf3d98f8e059ffe4c404e87f11c0199343c0bf81c244e9eb18ce59ec96e9182"} pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" containerMessage="Container frr-k8s-webhook-server failed liveness probe, will be restarted" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.877329 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" containerID="cri-o://3bf3d98f8e059ffe4c404e87f11c0199343c0bf81c244e9eb18ce59ec96e9182" gracePeriod=10 Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.949403 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="238926d6-2fb4-4759-9ef9-e93cca2c4bb0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.990400 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="238926d6-2fb4-4759-9ef9-e93cca2c4bb0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.990424 4689 patch_prober.go:28] interesting pod/loki-operator-controller-manager-775d8c8b9c-rkqj2 container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:32 crc kubenswrapper[4689]: I0123 12:13:32.990500 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podUID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.006385 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.031492 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.088436 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.088754 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.123112 4689 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-2r5kt container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.123598 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" podUID="83cfd8ec-2928-4cd8-a14c-330cce17bfd5" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.164466 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podUID="4922b965-fa40-47b5-b388-e63767b62a97" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.260251 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.316351 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.316481 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-tqgjs" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.316552 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.316625 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/speaker-tqgjs" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.318008 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="speaker" containerStatusID={"Type":"cri-o","ID":"f6e7cb295635c37786202069e1e22490fee04e4ae8622761ad1ce69cf5ad6799"} pod="metallb-system/speaker-tqgjs" containerMessage="Container speaker failed liveness probe, will be restarted" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.318110 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" containerID="cri-o://f6e7cb295635c37786202069e1e22490fee04e4ae8622761ad1ce69cf5ad6799" gracePeriod=2 Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.458322 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.458400 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.516362 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.516450 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.516701 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.641690 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:13:33 crc kubenswrapper[4689]: E0123 12:13:33.643586 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.778063 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.780078 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.780025 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.780129 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" probeResult="failure" output="command timed out" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.780362 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.780661 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-galera-0" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.781227 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.782388 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c"} pod="openstack/openstack-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.783639 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="prometheus" containerStatusID={"Type":"cri-o","ID":"396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18"} pod="openshift-monitoring/prometheus-k8s-0" containerMessage="Container prometheus failed liveness probe, will be restarted" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.783871 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" containerID="cri-o://396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" gracePeriod=600 Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.786159 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.786253 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.790626 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.790694 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 12:13:33 crc kubenswrapper[4689]: E0123 12:13:33.790719 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:33 crc kubenswrapper[4689]: E0123 12:13:33.792090 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:33 crc kubenswrapper[4689]: E0123 12:13:33.798427 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:33 crc kubenswrapper[4689]: E0123 12:13:33.798522 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.887276 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620"} pod="openshift-marketplace/redhat-operators-4zskl" containerMessage="Container registry-server failed liveness probe, will be restarted" Jan 23 12:13:33 crc kubenswrapper[4689]: I0123 12:13:33.887669 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" containerID="cri-o://0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" gracePeriod=30 Jan 23 12:13:34 crc kubenswrapper[4689]: E0123 12:13:34.041008 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:34 crc kubenswrapper[4689]: E0123 12:13:34.052170 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:34 crc kubenswrapper[4689]: E0123 12:13:34.060357 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:34 crc kubenswrapper[4689]: E0123 12:13:34.060444 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.318601 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.9:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.318977 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.548053 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.548082 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86d7448556-jq64j" podUID="e7fe0b60-2131-41ce-a23d-1ba4eb389afd" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.205:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.548112 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.548138 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-86d7448556-jq64j" podUID="e7fe0b60-2131-41ce-a23d-1ba4eb389afd" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.205:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.550346 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590464 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590482 4689 patch_prober.go:28] interesting pod/loki-operator-controller-manager-775d8c8b9c-rkqj2 container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590555 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podUID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590566 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590567 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-86d7448556-jq64j" podUID="e7fe0b60-2131-41ce-a23d-1ba4eb389afd" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.205:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590604 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590626 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590642 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590673 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590680 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590782 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.590971 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.591090 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-86d7448556-jq64j" podUID="e7fe0b60-2131-41ce-a23d-1ba4eb389afd" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.205:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.606302 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="prometheus-operator-admission-webhook" containerStatusID={"Type":"cri-o","ID":"a395e68b08186aa0babb4b1e0bcaf50412e72b1a503e7f53fee931e31e9a89e5"} pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" containerMessage="Container prometheus-operator-admission-webhook failed liveness probe, will be restarted" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.606906 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" containerID="cri-o://a395e68b08186aa0babb4b1e0bcaf50412e72b1a503e7f53fee931e31e9a89e5" gracePeriod=30 Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.619806 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-scheduler" containerStatusID={"Type":"cri-o","ID":"7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa"} pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" containerMessage="Container kube-scheduler failed liveness probe, will be restarted" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.619918 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" containerID="cri-o://7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa" gracePeriod=30 Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.632344 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.632418 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.632845 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.634980 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.695101 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:6443/livez?exclude=etcd\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.695432 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez?exclude=etcd\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.695175 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.695883 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.736397 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.736460 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.765793 4689 patch_prober.go:28] interesting pod/apiserver-76f77b778f-gkpnx container/openshift-apiserver namespace/openshift-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.765868 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" podUID="05112afa-e07a-4342-9d37-2a6b9b7ac9a2" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz?exclude=etcd&exclude=etcd-readiness\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.766136 4689 patch_prober.go:28] interesting pod/apiserver-76f77b778f-gkpnx container/openshift-apiserver namespace/openshift-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.14:8443/livez?exclude=etcd\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.766595 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-apiserver/apiserver-76f77b778f-gkpnx" podUID="05112afa-e07a-4342-9d37-2a6b9b7ac9a2" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/livez?exclude=etcd\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: E0123 12:13:34.772792 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.786189 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.972648 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.11:8080/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.972750 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.973283 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.11:8081/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.973487 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.975067 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"7c6024d1d94ca989e358842d0ac3f562b67ec8b1144437bb09e0cabf1ebfe4fb"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.975143 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" containerID="cri-o://7c6024d1d94ca989e358842d0ac3f562b67ec8b1144437bb09e0cabf1ebfe4fb" gracePeriod=30 Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.984864 4689 generic.go:334] "Generic (PLEG): container finished" podID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerID="c09b7791ff52d5b3704d52c0a91c3db861e0537267bebe09479b9a050c7febd4" exitCode=137 Jan 23 12:13:34 crc kubenswrapper[4689]: I0123 12:13:34.985067 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" event={"ID":"6087eb3b-66c0-4d14-a5de-008f086a59ee","Type":"ContainerDied","Data":"c09b7791ff52d5b3704d52c0a91c3db861e0537267bebe09479b9a050c7febd4"} Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.418382 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.418930 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.419081 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.419827 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.419883 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.419926 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.421725 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="console-operator" containerStatusID={"Type":"cri-o","ID":"e3da4255118290d8cb3a7be69771468a1e1d0e106c873e2be77ab1cd10e1d1b9"} pod="openshift-console-operator/console-operator-58897d9998-zhh2t" containerMessage="Container console-operator failed liveness probe, will be restarted" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.421857 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" containerID="cri-o://e3da4255118290d8cb3a7be69771468a1e1d0e106c873e2be77ab1cd10e1d1b9" gracePeriod=30 Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.518091 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.518136 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.518179 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.518205 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.525707 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.525758 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": context deadline exceeded" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.525727 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8081/ready\": context deadline exceeded" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.525803 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/ready\": context deadline exceeded" Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.549504 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.553818 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.554921 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.554976 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.555039 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.555141 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.555211 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.556123 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"d778f1fb3374d50358dab02ff6858745a22ad7313dc78c7dd36f554e7a066555"} pod="openshift-console/downloads-7954f5f757-hc5js" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.556231 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" containerID="cri-o://d778f1fb3374d50358dab02ff6858745a22ad7313dc78c7dd36f554e7a066555" gracePeriod=2 Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597409 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597487 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597587 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597585 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597612 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597630 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597647 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.597691 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.631847 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.632098 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.775361 4689 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-9gvwz container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.775429 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" podUID="9d058744-ff52-4a7c-8e44-86c81270e7d1" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.775429 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podUID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.775497 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.775555 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.779428 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.779584 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.780210 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="authentication-operator" containerStatusID={"Type":"cri-o","ID":"3228ffcb48d7c3d7668df8877738301ff810d2f4229fe537aba32e0352d31988"} pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" containerMessage="Container authentication-operator failed liveness probe, will be restarted" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.780297 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" podUID="9d058744-ff52-4a7c-8e44-86c81270e7d1" containerName="authentication-operator" containerID="cri-o://3228ffcb48d7c3d7668df8877738301ff810d2f4229fe537aba32e0352d31988" gracePeriod=30 Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.782189 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.782258 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.782813 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.782926 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.783297 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988"} pod="openshift-marketplace/community-operators-4hdbj" containerMessage="Container registry-server failed liveness probe, will be restarted" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.783343 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" containerID="cri-o://0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" gracePeriod=30 Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.783527 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.783559 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.784263 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" probeResult="failure" output="command timed out" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.790317 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" probeResult="failure" output="command timed out" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.790369 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.790465 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.810605 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.819961 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.821560 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:35 crc kubenswrapper[4689]: E0123 12:13:35.821625 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.995587 4689 generic.go:334] "Generic (PLEG): container finished" podID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerID="3bf3d98f8e059ffe4c404e87f11c0199343c0bf81c244e9eb18ce59ec96e9182" exitCode=0 Jan 23 12:13:35 crc kubenswrapper[4689]: I0123 12:13:35.995748 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" event={"ID":"3e9ac503-1ccb-4008-866b-0e6e5a11227d","Type":"ContainerDied","Data":"3bf3d98f8e059ffe4c404e87f11c0199343c0bf81c244e9eb18ce59ec96e9182"} Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.003822 4689 trace.go:236] Trace[964829125]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-compactor-0" (23-Jan-2026 12:13:18.560) (total time: 17438ms): Jan 23 12:13:36 crc kubenswrapper[4689]: Trace[964829125]: [17.438234126s] [17.438234126s] END Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.014348 4689 generic.go:334] "Generic (PLEG): container finished" podID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerID="c69e86b619f0f860ad22540bd294c686b0bcaff89701ee99aa0ff389609113bd" exitCode=137 Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.014414 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerDied","Data":"c69e86b619f0f860ad22540bd294c686b0bcaff89701ee99aa0ff389609113bd"} Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.017041 4689 generic.go:334] "Generic (PLEG): container finished" podID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerID="7c6024d1d94ca989e358842d0ac3f562b67ec8b1144437bb09e0cabf1ebfe4fb" exitCode=2 Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.017095 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db6cec49-030a-4282-a9a4-890f2783c0e5","Type":"ContainerDied","Data":"7c6024d1d94ca989e358842d0ac3f562b67ec8b1144437bb09e0cabf1ebfe4fb"} Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.019776 4689 generic.go:334] "Generic (PLEG): container finished" podID="4ab08845-476b-4601-9385-bbec37b18e35" containerID="f7c8f26242835071c3b6ce1bf32e2ae2945dcb1b1f36757408a9a2e51abf8dfc" exitCode=137 Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.019866 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtbb4" event={"ID":"4ab08845-476b-4601-9385-bbec37b18e35","Type":"ContainerDied","Data":"f7c8f26242835071c3b6ce1bf32e2ae2945dcb1b1f36757408a9a2e51abf8dfc"} Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.020726 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8"} pod="openstack/openstack-cell1-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.046371 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" podUID="8359ad74-2a40-4f5f-afe6-880a3f0a990e" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.046478 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.168265 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.168361 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.168370 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.168437 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.168460 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.168569 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.169707 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="olm-operator" containerStatusID={"Type":"cri-o","ID":"111c929bd1b9db70f524544791cc6fbb0fb391a0342bc4ef07d2d96d46ba3dab"} pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" containerMessage="Container olm-operator failed liveness probe, will be restarted" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.169754 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" containerID="cri-o://111c929bd1b9db70f524544791cc6fbb0fb391a0342bc4ef07d2d96d46ba3dab" gracePeriod=30 Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.229589 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.229659 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.229749 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.230408 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.230485 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.230545 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491336 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491399 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491420 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491454 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491485 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491493 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491508 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491523 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491472 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491542 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491595 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491384 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-5b54798db9-jfwb5" podUID="ab0d641a-3762-404a-baff-e2026b4a3896" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.212:8080/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491651 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491790 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491577 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-5b54798db9-jfwb5" podUID="ab0d641a-3762-404a-baff-e2026b4a3896" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.212:8080/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491924 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.491955 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.492514 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.493610 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.761436 4689 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.777100 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.781445 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.781512 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.782561 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb"} pod="openshift-marketplace/certified-operators-x87gr" containerMessage="Container registry-server failed liveness probe, will be restarted" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.782595 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" containerID="cri-o://ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" gracePeriod=30 Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.817443 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podUID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.817549 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.817576 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.817858 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.817884 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.946766 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/healthy\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.947161 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.958744 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="packageserver" containerStatusID={"Type":"cri-o","ID":"85d0d6abce5c7c60763b91d3ede66b06784e5c65356656df7bc6be0d6f9db9a7"} pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" containerMessage="Container packageserver failed liveness probe, will be restarted" Jan 23 12:13:36 crc kubenswrapper[4689]: I0123 12:13:36.958822 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" containerID="cri-o://85d0d6abce5c7c60763b91d3ede66b06784e5c65356656df7bc6be0d6f9db9a7" gracePeriod=30 Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:36.960956 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"a7012d420ee0f260434af4fb872249a22e20f5d91f87e6793846ecefb4818c06"} pod="openshift-ingress/router-default-5444994796-qvgvs" containerMessage="Container router failed liveness probe, will be restarted" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:36.961091 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" containerID="cri-o://a7012d420ee0f260434af4fb872249a22e20f5d91f87e6793846ecefb4818c06" gracePeriod=10 Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:36.965322 4689 trace.go:236] Trace[774021653]: "Calculate volume metrics of ovndbcluster-nb-etc-ovn for pod openstack/ovsdbserver-nb-0" (23-Jan-2026 12:13:23.402) (total time: 13563ms): Jan 23 12:13:38 crc kubenswrapper[4689]: Trace[774021653]: [13.563169182s] [13.563169182s] END Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:36.966831 4689 trace.go:236] Trace[2023636350]: "Calculate volume metrics of glance for pod openstack/glance-default-external-api-0" (23-Jan-2026 12:13:17.511) (total time: 19455ms): Jan 23 12:13:38 crc kubenswrapper[4689]: Trace[2023636350]: [19.455124671s] [19.455124671s] END Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:36.967139 4689 trace.go:236] Trace[1915063334]: "Calculate volume metrics of mysql-db for pod openstack/openstack-cell1-galera-0" (23-Jan-2026 12:13:22.372) (total time: 14594ms): Jan 23 12:13:38 crc kubenswrapper[4689]: Trace[1915063334]: [14.594480142s] [14.594480142s] END Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.049219 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="package-server-manager" containerStatusID={"Type":"cri-o","ID":"99e38766149d092ea1445a9f449e7740604ee04a96eac6479ea7d55dedd7919b"} pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" containerMessage="Container package-server-manager failed liveness probe, will be restarted" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.049274 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" containerID="cri-o://99e38766149d092ea1445a9f449e7740604ee04a96eac6479ea7d55dedd7919b" gracePeriod=30 Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.088341 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" podUID="8359ad74-2a40-4f5f-afe6-880a3f0a990e" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242398 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242451 4689 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-s7k65 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242474 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242542 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242547 4689 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-s7k65 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242618 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" podUID="386d7669-fab2-42b9-ac43-767d9ae837b8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242544 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-s7k65" podUID="386d7669-fab2-42b9-ac43-767d9ae837b8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.242560 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.457232 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.457661 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.489182 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.489229 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.489271 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.490232 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="oauth-openshift" containerStatusID={"Type":"cri-o","ID":"ffd3ae9e6aa8933d9d68a18735ff9e6b4b4b12a171b4e366fa7a22da630b6545"} pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" containerMessage="Container oauth-openshift failed liveness probe, will be restarted" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.490514 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.490592 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.490731 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.534561 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.534609 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.776379 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.780577 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:37.781646 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.407517 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.407590 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.412619 4689 generic.go:334] "Generic (PLEG): container finished" podID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerID="f6e7cb295635c37786202069e1e22490fee04e4ae8622761ad1ce69cf5ad6799" exitCode=137 Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.418620 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.418661 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.418687 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.418700 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tqgjs" event={"ID":"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf","Type":"ContainerDied","Data":"f6e7cb295635c37786202069e1e22490fee04e4ae8622761ad1ce69cf5ad6799"} Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.420624 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892"} pod="openshift-marketplace/redhat-marketplace-h6w2m" containerMessage="Container registry-server failed liveness probe, will be restarted" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.420697 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" containerID="cri-o://6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" gracePeriod=30 Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.473560 4689 patch_prober.go:28] interesting pod/console-5859dc97b8-47f8f container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.136:8443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.473698 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-5859dc97b8-47f8f" podUID="5d2085e7-92df-4502-97e9-66dfbfae189a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.136:8443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.473851 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.661138 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa9894a6_c179_4b45_a036_b94c23125162.slice/crio-d778f1fb3374d50358dab02ff6858745a22ad7313dc78c7dd36f554e7a066555.scope\": RecentStats: unable to find data in memory cache]" Jan 23 12:13:38 crc kubenswrapper[4689]: I0123 12:13:38.782054 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.795832 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.797295 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.798511 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.798550 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.799758 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.801425 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.803489 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:38 crc kubenswrapper[4689]: E0123 12:13:38.803547 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" Jan 23 12:13:39 crc kubenswrapper[4689]: E0123 12:13:39.032327 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:39 crc kubenswrapper[4689]: E0123 12:13:39.032695 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:39 crc kubenswrapper[4689]: E0123 12:13:39.033300 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:39 crc kubenswrapper[4689]: E0123 12:13:39.033347 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.049373 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podUID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.049789 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.202350 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.202522 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.293485 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.425769 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_43e3f941-da86-4f2a-80ea-24d29e55acb3/ovn-northd/0.log" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.426049 4689 generic.go:334] "Generic (PLEG): container finished" podID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" exitCode=139 Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.426101 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"43e3f941-da86-4f2a-80ea-24d29e55acb3","Type":"ContainerDied","Data":"0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4"} Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.428330 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podUID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.428479 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.428662 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.428889 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podUID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.429248 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.469338 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" podUID="b5e62e31-60a7-4964-b3e7-611e7a8bfa81" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.511382 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" podUID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.511833 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.512011 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.525242 4689 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-vllhz container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.525298 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podUID="72abaa76-42ea-4987-8f23-f4aba4f669e2" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.641176 4689 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rgsmc container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": context deadline exceeded" start-of-body= Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.641229 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podUID="8297556c-bbae-4eb0-b3da-b09a005c90f6" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": context deadline exceeded" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.725402 4689 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-6sm7h container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.725711 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podUID="faf0752a-d119-41d3-913f-6377a601e8ca" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.736471 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.736534 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.777405 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z5sn9" podUID="90c7af03-d2b6-45ef-b228-d5621bf1f671" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.792618 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" podUID="d36ac685-507d-4cfa-b6fe-7f595536c32f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.792748 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.961382 4689 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-7qpjs container/perses-operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.17:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.961452 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.961396 4689 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-7qpjs container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:39 crc kubenswrapper[4689]: I0123 12:13:39.961510 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" podUID="a13e2123-3780-4c13-b8a4-760d31e5636e" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.17:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.002506 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" podUID="72fb2e87-da8d-4db1-b255-d38d7c15b5cd" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.002615 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="db6cec49-030a-4282-a9a4-890f2783c0e5" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.11:8081/readyz\": dial tcp 10.217.1.11:8081: connect: connection refused" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.002717 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" podUID="6087eb3b-66c0-4d14-a5de-008f086a59ee" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": dial tcp 10.217.0.94:7472: connect: connection refused" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.090167 4689 patch_prober.go:28] interesting pod/etcd-crc container/etcd namespace/openshift-etcd: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=failed to establish etcd client: giving up getting a cached client after 3 tries Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.090259 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-etcd/etcd-crc" podUID="2139d3e2895fc6797b9c76a1b4c9886d" containerName="etcd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.090691 4689 patch_prober.go:28] interesting pod/etcd-crc container/etcd namespace/openshift-etcd: Liveness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=failed to establish etcd client: giving up getting a cached client after 3 tries Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.090768 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-etcd/etcd-crc" podUID="2139d3e2895fc6797b9c76a1b4c9886d" containerName="etcd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.109375 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" podUID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.109364 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podUID="167d35d1-8eb3-492e-beb3-4325d183c7b9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.109543 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.109671 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.249317 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" podUID="53597531-35c9-4478-95cc-690c554f04d0" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.99:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.249378 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" podUID="53597531-35c9-4478-95cc-690c554f04d0" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.249712 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.312754 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.314019 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.314991 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.315026 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.347395 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="Get \"http://10.217.0.35:9898/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.347513 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.349118 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="hostpath-provisioner" containerStatusID={"Type":"cri-o","ID":"3eadc0f88cda2375616ec2db9c2d200b9461f716dcd8caaf83f49b0f88e38114"} pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" containerMessage="Container hostpath-provisioner failed liveness probe, will be restarted" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.349372 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" containerID="cri-o://3eadc0f88cda2375616ec2db9c2d200b9461f716dcd8caaf83f49b0f88e38114" gracePeriod=30 Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.388387 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" podUID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.388506 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.388531 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.463754 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" podUID="f451d39d-2f3f-4c53-b5a2-d8e7f74247f9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.522231 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.522292 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.522317 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.522382 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.524803 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.524826 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.524904 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.524987 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.525011 4689 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-vllhz container/loki-distributor namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.52:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.525050 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" podUID="72abaa76-42ea-4987-8f23-f4aba4f669e2" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.627371 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" podUID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.627384 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.627590 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" podUID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.641420 4689 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rgsmc container/loki-querier namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.641513 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" podUID="8297556c-bbae-4eb0-b3da-b09a005c90f6" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.662024 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.662100 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.668332 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" podUID="af5f2d1f-74a0-4ac2-9e78-c81c3815f722" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.668337 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-687f57d79b-pw8pk" podUID="7496161e-1841-4a0e-ac40-e157bbfd9520" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.668423 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.696629 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_downloads-7954f5f757-hc5js_fa9894a6-c179-4b45-a036-b94c23125162/download-server/0.log" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.696710 4689 generic.go:334] "Generic (PLEG): container finished" podID="fa9894a6-c179-4b45-a036-b94c23125162" containerID="d778f1fb3374d50358dab02ff6858745a22ad7313dc78c7dd36f554e7a066555" exitCode=137 Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.696879 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hc5js" event={"ID":"fa9894a6-c179-4b45-a036-b94c23125162","Type":"ContainerDied","Data":"d778f1fb3374d50358dab02ff6858745a22ad7313dc78c7dd36f554e7a066555"} Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.701842 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" event={"ID":"f9dd4a75-d3a3-42d6-a305-43e95e450611","Type":"ContainerStarted","Data":"cb23c6496dd6310407e6caaa9f2251a7f2f59103e0a7ad2c8b76d76192dcda28"} Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.702307 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.702715 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.702774 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.706436 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" event={"ID":"6087eb3b-66c0-4d14-a5de-008f086a59ee","Type":"ContainerStarted","Data":"27bb729d88bdd8e56ec48676cdfc7aaa80a861b6945866970e38f655dc0cdf3e"} Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.706617 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.720109 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": dial tcp 127.0.0.1:7572: connect: connection refused" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.724225 4689 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-6sm7h container/loki-query-frontend namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": context deadline exceeded" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.724305 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" podUID="faf0752a-d119-41d3-913f-6377a601e8ca" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": context deadline exceeded" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.726411 4689 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.726487 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-compactor-0" podUID="79a8bb59-41ce-4777-90af-ded6dfe2e080" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.731505 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" podUID="3e9ac503-1ccb-4008-866b-0e6e5a11227d" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": dial tcp 10.217.0.95:7572: connect: connection refused" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.781216 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-s4nwf" podUID="6c69c7bf-0e75-4bed-a212-2b7746d5ef88" containerName="ovn-controller" probeResult="failure" output="command timed out" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.781483 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-controller-ovs-kfl9p" podUID="f125458c-8822-4c87-a559-adf4f9387166" containerName="ovs-vswitchd" probeResult="failure" output="command timed out" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.783362 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-controller-s4nwf" podUID="6c69c7bf-0e75-4bed-a212-2b7746d5ef88" containerName="ovn-controller" probeResult="failure" output="command timed out" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.784562 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-ovs-kfl9p" podUID="f125458c-8822-4c87-a559-adf4f9387166" containerName="ovsdb-server" probeResult="failure" output="command timed out" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.785219 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-ovs-kfl9p" podUID="f125458c-8822-4c87-a559-adf4f9387166" containerName="ovs-vswitchd" probeResult="failure" output="command timed out" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.786106 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-controller-ovs-kfl9p" podUID="f125458c-8822-4c87-a559-adf4f9387166" containerName="ovsdb-server" probeResult="failure" output="command timed out" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.791910 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.80:3101/ready\": context deadline exceeded" start-of-body= Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.793708 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/ready\": context deadline exceeded" Jan 23 12:13:40 crc kubenswrapper[4689]: I0123 12:13:40.794543 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.795962 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.797997 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.800174 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.800188 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.800262 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.801940 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.803449 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:40 crc kubenswrapper[4689]: E0123 12:13:40.803502 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" Jan 23 12:13:41 crc kubenswrapper[4689]: I0123 12:13:41.054332 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-api-84b55874c-d6r8c" podUID="a256dd27-3435-4bcb-9ca0-46a0d472325b" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.1.21:8004/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.152348 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" podUID="167d35d1-8eb3-492e-beb3-4325d183c7b9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.165338 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-84b55874c-d6r8c" podUID="a256dd27-3435-4bcb-9ca0-46a0d472325b" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.1.21:8004/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.165335 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-cfnapi-79f99b898-dt7zp" podUID="04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.1.22:8000/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.165305 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-79f99b898-dt7zp" podUID="04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.1.22:8000/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.181302 4689 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-pjr88 container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.181344 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podUID="64f769e0-be75-4b1f-8cbb-587842d51589" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.181356 4689 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-pjr88 container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.181384 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podUID="64f769e0-be75-4b1f-8cbb-587842d51589" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.62:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.181412 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.181437 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.181989 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry" containerStatusID={"Type":"cri-o","ID":"124fdd031e1adbb915bd981f1d70c8b6fbe7b04b210766c7de8d99eaa4f11897"} pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" containerMessage="Container registry failed liveness probe, will be restarted" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.291374 4689 patch_prober.go:28] interesting pod/monitoring-plugin-78f56cd898-f5fg2 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.291411 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" podUID="53597531-35c9-4478-95cc-690c554f04d0" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.99:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.291454 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" podUID="75df6af8-26fb-433c-99c0-da4b88e4796d" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.77:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.390984 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-6968d8fdc4-mtbb4" podUID="4ab08845-476b-4601-9385-bbec37b18e35" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": dial tcp 10.217.0.96:29150: connect: connection refused" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.521984 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.55:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.522036 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.522068 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-m77fj container/gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.55:8081/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.522139 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-m77fj" podUID="ee6fa0a6-5ac3-4202-9280-8babe4cb29a0" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.525448 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.57:8081/live\": context deadline exceeded" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.525516 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/live\": context deadline exceeded" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.525579 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.57:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.525672 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.662208 4689 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.58:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.662281 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-ingester-0" podUID="cb56bf0f-badb-490a-be0a-2ef41c9a2459" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.58:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.710339 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" podUID="af5f2d1f-74a0-4ac2-9e78-c81c3815f722" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.93:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.726175 4689 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.75:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.726233 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-compactor-0" podUID="79a8bb59-41ce-4777-90af-ded6dfe2e080" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.75:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.760357 4689 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-pkdqh" podUID="2cd07ec1-86a5-45f4-b5a6-edaa4f185c17" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.790578 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.80:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.790690 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.859920 4689 patch_prober.go:28] interesting pod/loki-operator-controller-manager-775d8c8b9c-rkqj2 container/manager namespace/openshift-operators-redhat: Liveness probe status=failure output="Get \"http://10.217.0.49:8081/healthz\": dial tcp 10.217.0.49:8081: connect: connection refused" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.859978 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podUID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8081/healthz\": dial tcp 10.217.0.49:8081: connect: connection refused" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.859916 4689 patch_prober.go:28] interesting pod/loki-operator-controller-manager-775d8c8b9c-rkqj2 container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.49:8081/readyz\": dial tcp 10.217.0.49:8081: connect: connection refused" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.860028 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" podUID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8081/readyz\": dial tcp 10.217.0.49:8081: connect: connection refused" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.944449 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:41.944907 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/healthy\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.163361 4689 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-2r5kt container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.163393 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podUID="4922b965-fa40-47b5-b388-e63767b62a97" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.163419 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" podUID="83cfd8ec-2928-4cd8-a14c-330cce17bfd5" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.163469 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" podUID="4922b965-fa40-47b5-b388-e63767b62a97" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.220420 4689 patch_prober.go:28] interesting pod/network-check-target-xd92c container/network-check-target-container namespace/openshift-network-diagnostics: Readiness probe status=failure output="Get \"http://10.217.0.4:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.220510 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" containerName="network-check-target-container" probeResult="failure" output="Get \"http://10.217.0.4:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.276505 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-tqgjs" podUID="00b72a13-b4c0-43b7-97b9-1e9a1ec55edf" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": dial tcp [::1]:29150: connect: connection refused" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.461940 4689 patch_prober.go:28] interesting pod/thanos-querier-85b45566f7-bqf9r container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.462077 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-85b45566f7-bqf9r" podUID="9875c931-d946-41e1-8f23-89946abc0978" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.74:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.462479 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.462543 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.743286 4689 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.743341 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.774339 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.774415 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.892647 4689 generic.go:334] "Generic (PLEG): container finished" podID="6cbb7c9e-32cf-4368-8983-96d4006dcd58" containerID="8ffcdc1a70527eced45a0ee10a6a530fcfcaa83963d63c4dae3652dbc9274813" exitCode=1 Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.898388 4689 generic.go:334] "Generic (PLEG): container finished" podID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerID="a395e68b08186aa0babb4b1e0bcaf50412e72b1a503e7f53fee931e31e9a89e5" exitCode=0 Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.920947 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-58897d9998-zhh2t_de449087-a423-43c1-9295-91572c72bedd/console-operator/0.log" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.921009 4689 generic.go:334] "Generic (PLEG): container finished" podID="de449087-a423-43c1-9295-91572c72bedd" containerID="e3da4255118290d8cb3a7be69771468a1e1d0e106c873e2be77ab1cd10e1d1b9" exitCode=1 Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.921746 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.921794 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 23 12:13:42 crc kubenswrapper[4689]: E0123 12:13:42.922113 4689 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.922536 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.638137517s: [/var/lib/containers/storage/overlay/6379f0a62bedcba1b2e5adf4207a0cc6a7a041bd0898db7028bdcf1168e34c5d/diff /var/log/pods/openstack_heat-engine-7db46dddd6-txhvk_222ae852-00ad-449b-a92b-b0f52d2b856f/heat-engine/0.log]; will not log again for this container unless duration exceeds 2s Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.923986 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.689464291s: [/var/lib/containers/storage/overlay/254e819449aa215882a9002ffd41ba183fe5b4c69ef741fe0fcaba73a3bfffad/diff /var/log/pods/openstack_keystone-777b6f5fc9-72drb_036797f0-f940-4ef8-9b43-cc12843d2338/keystone-api/0.log]; will not log again for this container unless duration exceeds 2s Jan 23 12:13:42 crc kubenswrapper[4689]: E0123 12:13:42.925992 4689 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.287s" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.926399 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" event={"ID":"6cbb7c9e-32cf-4368-8983-96d4006dcd58","Type":"ContainerDied","Data":"8ffcdc1a70527eced45a0ee10a6a530fcfcaa83963d63c4dae3652dbc9274813"} Jan 23 12:13:42 crc kubenswrapper[4689]: E0123 12:13:42.926426 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.926480 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.926493 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" event={"ID":"e7dd97d0-dc97-4bfa-9048-a065d03576ad","Type":"ContainerDied","Data":"a395e68b08186aa0babb4b1e0bcaf50412e72b1a503e7f53fee931e31e9a89e5"} Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.926509 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" event={"ID":"de449087-a423-43c1-9295-91572c72bedd","Type":"ContainerDied","Data":"e3da4255118290d8cb3a7be69771468a1e1d0e106c873e2be77ab1cd10e1d1b9"} Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.928165 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="prometheus" containerStatusID={"Type":"cri-o","ID":"ca718988dcb3cd7101ef11ad17e6b71775bccaa96d02175dba36c85f9a2cc7f7"} pod="openstack/prometheus-metric-storage-0" containerMessage="Container prometheus failed liveness probe, will be restarted" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.928256 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" containerID="cri-o://ca718988dcb3cd7101ef11ad17e6b71775bccaa96d02175dba36c85f9a2cc7f7" gracePeriod=600 Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.928989 4689 scope.go:117] "RemoveContainer" containerID="8ffcdc1a70527eced45a0ee10a6a530fcfcaa83963d63c4dae3652dbc9274813" Jan 23 12:13:42 crc kubenswrapper[4689]: E0123 12:13:42.932175 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.935946 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": unexpected EOF" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.935993 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": unexpected EOF" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.938563 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": unexpected EOF" start-of-body= Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.938604 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": unexpected EOF" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.938649 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.940431 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"4f3cf7caf60d56d4b30a07807d8746b90cb2691b010108b3bbea27a26f318a9c"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed liveness probe, will be restarted" Jan 23 12:13:42 crc kubenswrapper[4689]: I0123 12:13:42.940564 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://4f3cf7caf60d56d4b30a07807d8746b90cb2691b010108b3bbea27a26f318a9c" gracePeriod=30 Jan 23 12:13:42 crc kubenswrapper[4689]: E0123 12:13:42.946315 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:42 crc kubenswrapper[4689]: E0123 12:13:42.946384 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.006432 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.057868 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" start-of-body= Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.057934 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.089496 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.089761 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.089806 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:43 crc kubenswrapper[4689]: E0123 12:13:43.104378 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-4f3cf7caf60d56d4b30a07807d8746b90cb2691b010108b3bbea27a26f318a9c.scope\": RecentStats: unable to find data in memory cache]" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.781064 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podUID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerName="sbdb" probeResult="failure" output="command timed out" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.781406 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output="command timed out" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.781064 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" podUID="6fb5dbdd-e317-40a3-8d3c-d50cc6a66dce" containerName="nbdb" probeResult="failure" output="command timed out" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.781486 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.781535 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 12:13:43 crc kubenswrapper[4689]: E0123 12:13:43.785139 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:43 crc kubenswrapper[4689]: E0123 12:13:43.786663 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:43 crc kubenswrapper[4689]: E0123 12:13:43.788074 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:43 crc kubenswrapper[4689]: E0123 12:13:43.788106 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.872524 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.896881 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.961351 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtbb4" event={"ID":"4ab08845-476b-4601-9385-bbec37b18e35","Type":"ContainerStarted","Data":"45b5a7bbc13b130a2a44f5eaa19df38f91181d58a843eced4bb83510230a7bc3"} Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.961926 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.964823 4689 generic.go:334] "Generic (PLEG): container finished" podID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerID="9c32f1149eaa11f33916981fc8fd2ba53b5fc5ff3ce5418895c08eb8a0538fef" exitCode=0 Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.964892 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerDied","Data":"9c32f1149eaa11f33916981fc8fd2ba53b5fc5ff3ce5418895c08eb8a0538fef"} Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.972380 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pkdqh" event={"ID":"2cd07ec1-86a5-45f4-b5a6-edaa4f185c17","Type":"ContainerStarted","Data":"402bee0af31f5ed7665c8eb8676607c29e7e5be63656bd6e32cbb2a9f52cac72"} Jan 23 12:13:43 crc kubenswrapper[4689]: I0123 12:13:43.972844 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-pkdqh" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.007667 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.023261 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.031335 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.031407 4689 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="4f3cf7caf60d56d4b30a07807d8746b90cb2691b010108b3bbea27a26f318a9c" exitCode=1 Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.031449 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"4f3cf7caf60d56d4b30a07807d8746b90cb2691b010108b3bbea27a26f318a9c"} Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.031482 4689 scope.go:117] "RemoveContainer" containerID="f48044d8c44a7e1f5f60ba3b21fcd4af54aed4f6696c19ebcd00f78ff99adc34" Jan 23 12:13:44 crc kubenswrapper[4689]: E0123 12:13:44.032040 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:44 crc kubenswrapper[4689]: E0123 12:13:44.032681 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:44 crc kubenswrapper[4689]: E0123 12:13:44.033108 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:44 crc kubenswrapper[4689]: E0123 12:13:44.033187 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.319489 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Liveness probe status=failure output="Get \"https://10.217.0.9:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.319745 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/livez?exclude=etcd\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.319787 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.320625 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="oauth-apiserver" containerStatusID={"Type":"cri-o","ID":"ae53b5bf555f0f34d69a4028ac38c5e675df395b21c5bd362fa18c339f207ede"} pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" containerMessage="Container oauth-apiserver failed liveness probe, will be restarted" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.320661 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" containerID="cri-o://ae53b5bf555f0f34d69a4028ac38c5e675df395b21c5bd362fa18c339f207ede" gracePeriod=120 Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.417860 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.417923 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.472651 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.472706 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.632777 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.632842 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.633021 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.640831 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:13:44 crc kubenswrapper[4689]: E0123 12:13:44.641369 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.696370 4689 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:6443/livez?exclude=etcd\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.696423 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez?exclude=etcd\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.751618 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podUID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/healthz\": dial tcp 10.217.0.115:8081: connect: connection refused" Jan 23 12:13:44 crc kubenswrapper[4689]: I0123 12:13:44.751758 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" podUID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": dial tcp 10.217.0.115:8081: connect: connection refused" Jan 23 12:13:44 crc kubenswrapper[4689]: E0123 12:13:44.774524 4689 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:44 crc kubenswrapper[4689]: E0123 12:13:44.810615 4689 event.go:359] "Server rejected event (will not retry!)" err="Internal error occurred: admission plugin \"MutatingAdmissionWebhook\" failed to complete mutation in 13s" event=< Jan 23 12:13:44 crc kubenswrapper[4689]: &Event{ObjectMeta:{loki-operator-controller-manager-775d8c8b9c-rkqj2.188d5b1958bc1cee openshift-operators-redhat 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-operators-redhat,Name:loki-operator-controller-manager-775d8c8b9c-rkqj2,UID:6cbb7c9e-32cf-4368-8983-96d4006dcd58,APIVersion:v1,ResourceVersion:34211,FieldPath:spec.containers{manager},},Reason:ProbeError,Message:Readiness probe error: Get "http://10.217.0.49:8081/readyz": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Jan 23 12:13:44 crc kubenswrapper[4689]: body: Jan 23 12:13:44 crc kubenswrapper[4689]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 12:13:13.047506158 +0000 UTC m=+5057.672186017,LastTimestamp:2026-01-23 12:13:13.047506158 +0000 UTC m=+5057.672186017,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 23 12:13:44 crc kubenswrapper[4689]: > Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:44.997822 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.043363 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" event={"ID":"e7dd97d0-dc97-4bfa-9048-a065d03576ad","Type":"ContainerStarted","Data":"b8d434796672ed63c6d0dbd5784d16d4b0940cb9250dd1b7593e281f52025547"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.045309 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.045676 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.045731 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.047844 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" event={"ID":"3e9ac503-1ccb-4008-866b-0e6e5a11227d","Type":"ContainerStarted","Data":"8afe45bf0e59e127a89ab9d41346a13f9329bd4968e015e4658be061bdc638e5"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.047933 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.050186 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_43e3f941-da86-4f2a-80ea-24d29e55acb3/ovn-northd/0.log" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.050288 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"43e3f941-da86-4f2a-80ea-24d29e55acb3","Type":"ContainerStarted","Data":"4fad7d8ce78587fe10c800c31eee0a884b9b1c95e33fd22b4e54a2aa3cb195e8"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.051878 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.054734 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tqgjs" event={"ID":"00b72a13-b4c0-43b7-97b9-1e9a1ec55edf","Type":"ContainerStarted","Data":"d09607354ee7ddfbaf58a1915ad181ba95100fdf9896c1b882bd45a2523c05d3"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.054941 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-tqgjs" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.056604 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" event={"ID":"6cbb7c9e-32cf-4368-8983-96d4006dcd58","Type":"ContainerStarted","Data":"24e9a132da8f0396b3fe43ad641387517b800dadcb1f8896e78a57827c814679"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.056779 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.058818 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console-operator_console-operator-58897d9998-zhh2t_de449087-a423-43c1-9295-91572c72bedd/console-operator/0.log" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.058898 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" event={"ID":"de449087-a423-43c1-9295-91572c72bedd","Type":"ContainerStarted","Data":"d1a74a91d59e64ff28da40b7c35e7f3f276cfb93d7ef5e8a6e0039347da6b6c4"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.059091 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.059469 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.059530 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.060983 4689 generic.go:334] "Generic (PLEG): container finished" podID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerID="85d0d6abce5c7c60763b91d3ede66b06784e5c65356656df7bc6be0d6f9db9a7" exitCode=0 Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.061038 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" event={"ID":"6105379b-1fb8-4384-b6d5-67b4db5498e5","Type":"ContainerDied","Data":"85d0d6abce5c7c60763b91d3ede66b06784e5c65356656df7bc6be0d6f9db9a7"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.062789 4689 generic.go:334] "Generic (PLEG): container finished" podID="99f43d3e-dce3-4f53-90a5-76793663baaf" containerID="58352f3c3146b53b308d350cabbf3a9d19111830b2b8b294e1ce099e2323ea85" exitCode=1 Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.062885 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" event={"ID":"99f43d3e-dce3-4f53-90a5-76793663baaf","Type":"ContainerDied","Data":"58352f3c3146b53b308d350cabbf3a9d19111830b2b8b294e1ce099e2323ea85"} Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.167867 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.167934 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.229043 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.229107 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.247031 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.247082 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.249175 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]backend-http ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]has-synced ok Jan 23 12:13:45 crc kubenswrapper[4689]: [-]process-running failed: reason withheld Jan 23 12:13:45 crc kubenswrapper[4689]: healthz check failed Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.249206 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.258195 4689 scope.go:117] "RemoveContainer" containerID="58352f3c3146b53b308d350cabbf3a9d19111830b2b8b294e1ce099e2323ea85" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.299038 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.318465 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]log ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:13:45 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:13:45 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:13:45 crc kubenswrapper[4689]: readyz check failed Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.318556 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.321704 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fjpqb" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.458966 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.459264 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.492009 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.492092 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.495279 4689 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xvxpp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.495328 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" podUID="f9dd4a75-d3a3-42d6-a305-43e95e450611" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.547659 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9 is running failed: container process not found" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.548026 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9 is running failed: container process not found" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.548426 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9 is running failed: container process not found" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.548462 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9 is running failed: container process not found" probeType="Readiness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.631513 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.631598 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.631682 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.761738 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-pkdqh" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.779515 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output="command timed out" Jan 23 12:13:45 crc kubenswrapper[4689]: I0123 12:13:45.781841 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.802981 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.804430 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.805643 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:45 crc kubenswrapper[4689]: E0123 12:13:45.805681 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.076041 4689 generic.go:334] "Generic (PLEG): container finished" podID="54403d19-67da-4783-8b45-b7070bc15424" containerID="4c9d3b565f75fab4b3494dcdff98af5885c61dcab588c19ad536c3b08a941da3" exitCode=1 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.076134 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" event={"ID":"54403d19-67da-4783-8b45-b7070bc15424","Type":"ContainerDied","Data":"4c9d3b565f75fab4b3494dcdff98af5885c61dcab588c19ad536c3b08a941da3"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.077396 4689 scope.go:117] "RemoveContainer" containerID="4c9d3b565f75fab4b3494dcdff98af5885c61dcab588c19ad536c3b08a941da3" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.078605 4689 generic.go:334] "Generic (PLEG): container finished" podID="7ad0b754-e721-4b19-b0b6-a7e1200a48d4" containerID="79c19042317d1fe97b74a17bab64b7bf7a976590c8d5ae7134d35b51c560cf55" exitCode=1 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.078668 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" event={"ID":"7ad0b754-e721-4b19-b0b6-a7e1200a48d4","Type":"ContainerDied","Data":"79c19042317d1fe97b74a17bab64b7bf7a976590c8d5ae7134d35b51c560cf55"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.079720 4689 scope.go:117] "RemoveContainer" containerID="79c19042317d1fe97b74a17bab64b7bf7a976590c8d5ae7134d35b51c560cf55" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.080778 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.081897 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"be0e2dca32747b5dcb42226d8ada77ef27352482567bf27fd95f7f84f459c987"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.082316 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed liveness probe, will be restarted" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.082396 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" containerID="cri-o://8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee" gracePeriod=30 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.084950 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db6cec49-030a-4282-a9a4-890f2783c0e5","Type":"ContainerStarted","Data":"3e510b3bb1139dc2515ccb561e8ebcd921c2a948128072dc2516224e464bcc7c"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.085068 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.087731 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" event={"ID":"99f43d3e-dce3-4f53-90a5-76793663baaf","Type":"ContainerStarted","Data":"5e8fd4433fa6536171e6f64fc7a2b498f13048dbf62d9dca76d9785247b5342f"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.088313 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.090330 4689 generic.go:334] "Generic (PLEG): container finished" podID="8359ad74-2a40-4f5f-afe6-880a3f0a990e" containerID="939f3a2aac68fd7213a2bb984c9b0345accceb9fbf27f1040d0d8f58c0057bcc" exitCode=1 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.090377 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" event={"ID":"8359ad74-2a40-4f5f-afe6-880a3f0a990e","Type":"ContainerDied","Data":"939f3a2aac68fd7213a2bb984c9b0345accceb9fbf27f1040d0d8f58c0057bcc"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.091614 4689 scope.go:117] "RemoveContainer" containerID="939f3a2aac68fd7213a2bb984c9b0345accceb9fbf27f1040d0d8f58c0057bcc" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.107282 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" event={"ID":"6105379b-1fb8-4384-b6d5-67b4db5498e5","Type":"ContainerStarted","Data":"fada9f2c82f2ca309cac24ab85c9a13b24a2bba2ad496147d4966cddcfdbca69"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.108809 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.110127 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.110740 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.128869 4689 generic.go:334] "Generic (PLEG): container finished" podID="28a286e0-4072-40b0-aa95-4a12299f5a72" containerID="ae66ba54a8d8aa8d294822d67c78a0cfeeae3a8339ea4505ec06bff6858dc64d" exitCode=1 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.128975 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" event={"ID":"28a286e0-4072-40b0-aa95-4a12299f5a72","Type":"ContainerDied","Data":"ae66ba54a8d8aa8d294822d67c78a0cfeeae3a8339ea4505ec06bff6858dc64d"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.130682 4689 scope.go:117] "RemoveContainer" containerID="ae66ba54a8d8aa8d294822d67c78a0cfeeae3a8339ea4505ec06bff6858dc64d" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.139472 4689 generic.go:334] "Generic (PLEG): container finished" podID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerID="4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9" exitCode=0 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.139627 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d9bfx" event={"ID":"d92e2c5f-df9d-44e5-839c-806799a650a4","Type":"ContainerDied","Data":"4dcbbbf014e89fea21263c6a1f044318bbb9d2f4598aebf53b6dddcaf2d147a9"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.148017 4689 generic.go:334] "Generic (PLEG): container finished" podID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerID="6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" exitCode=0 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.148096 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6w2m" event={"ID":"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf","Type":"ContainerDied","Data":"6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.151276 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_downloads-7954f5f757-hc5js_fa9894a6-c179-4b45-a036-b94c23125162/download-server/0.log" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.151392 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hc5js" event={"ID":"fa9894a6-c179-4b45-a036-b94c23125162","Type":"ContainerStarted","Data":"cdae71403f1cef069a3db6d73580a5dac2e51b2609813ab908a515617eeccc18"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.152049 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.152094 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.152140 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.155243 4689 generic.go:334] "Generic (PLEG): container finished" podID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" exitCode=0 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.155299 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zskl" event={"ID":"0cb158e3-50d7-4750-8f95-c22d0a94a70f","Type":"ContainerDied","Data":"0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.156995 4689 generic.go:334] "Generic (PLEG): container finished" podID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerID="111c929bd1b9db70f524544791cc6fbb0fb391a0342bc4ef07d2d96d46ba3dab" exitCode=0 Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.157070 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" event={"ID":"3377dc50-b5b0-40d0-9b16-295713320fcd","Type":"ContainerDied","Data":"111c929bd1b9db70f524544791cc6fbb0fb391a0342bc4ef07d2d96d46ba3dab"} Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.158536 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" start-of-body= Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.158577 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" Jan 23 12:13:46 crc kubenswrapper[4689]: E0123 12:13:46.158684 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.159740 4689 patch_prober.go:28] interesting pod/console-operator-58897d9998-zhh2t container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 23 12:13:46 crc kubenswrapper[4689]: E0123 12:13:46.160355 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.160541 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" podUID="de449087-a423-43c1-9295-91572c72bedd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/readyz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 23 12:13:46 crc kubenswrapper[4689]: E0123 12:13:46.160656 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" containerID="0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 12:13:46 crc kubenswrapper[4689]: E0123 12:13:46.160689 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ae841230d31165cb9cc13ee1d864532dee6ffc4e3dccae2391931993cee2cb4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="43e3f941-da86-4f2a-80ea-24d29e55acb3" containerName="ovn-northd" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.739522 4689 trace.go:236] Trace[673773423]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-ingester-0" (23-Jan-2026 12:13:34.112) (total time: 12626ms): Jan 23 12:13:46 crc kubenswrapper[4689]: Trace[673773423]: [12.626973414s] [12.626973414s] END Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.740242 4689 trace.go:236] Trace[622126734]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-pjr88" (23-Jan-2026 12:13:43.901) (total time: 2838ms): Jan 23 12:13:46 crc kubenswrapper[4689]: Trace[622126734]: [2.838435254s] [2.838435254s] END Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.743033 4689 trace.go:236] Trace[613708053]: "Calculate volume metrics of prometheus-metric-storage-db for pod openstack/prometheus-metric-storage-0" (23-Jan-2026 12:13:39.227) (total time: 7515ms): Jan 23 12:13:46 crc kubenswrapper[4689]: Trace[613708053]: [7.515456789s] [7.515456789s] END Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.793605 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.809555 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 23 12:13:46 crc kubenswrapper[4689]: I0123 12:13:46.809611 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.181817 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ingress_router-default-5444994796-qvgvs_c7b42454-f36b-4ab0-86d0-a2decba67e28/router/0.log" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.191941 4689 generic.go:334] "Generic (PLEG): container finished" podID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerID="a7012d420ee0f260434af4fb872249a22e20f5d91f87e6793846ecefb4818c06" exitCode=137 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.192115 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qvgvs" event={"ID":"c7b42454-f36b-4ab0-86d0-a2decba67e28","Type":"ContainerDied","Data":"a7012d420ee0f260434af4fb872249a22e20f5d91f87e6793846ecefb4818c06"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.199737 4689 generic.go:334] "Generic (PLEG): container finished" podID="f451d39d-2f3f-4c53-b5a2-d8e7f74247f9" containerID="06d5b593341eacea193741a2238185d9478479717081e0fb3893a5364a2ea84b" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.199825 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" event={"ID":"f451d39d-2f3f-4c53-b5a2-d8e7f74247f9","Type":"ContainerDied","Data":"06d5b593341eacea193741a2238185d9478479717081e0fb3893a5364a2ea84b"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.200688 4689 scope.go:117] "RemoveContainer" containerID="06d5b593341eacea193741a2238185d9478479717081e0fb3893a5364a2ea84b" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.206032 4689 generic.go:334] "Generic (PLEG): container finished" podID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.206093 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x87gr" event={"ID":"5fbbf7f9-c268-4a7b-a278-4f72a9099acf","Type":"ContainerDied","Data":"ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.238869 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerID="02fcdba4ae0517ab65a02ae2a217999286dcdecc9b70247c81169a00a58db698" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.238933 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" event={"ID":"3d3561eb-7369-4466-b9ee-037e02b2c219","Type":"ContainerDied","Data":"02fcdba4ae0517ab65a02ae2a217999286dcdecc9b70247c81169a00a58db698"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.239749 4689 scope.go:117] "RemoveContainer" containerID="02fcdba4ae0517ab65a02ae2a217999286dcdecc9b70247c81169a00a58db698" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.246186 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.247677 4689 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.247733 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"8dd638cecdcfd3336e220f94f9b9fe8ebd3332faaa19aa75cfd22f0ff0087aee"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.251862 4689 generic.go:334] "Generic (PLEG): container finished" podID="6e48e594-66b4-4d88-823f-2ed90fa79d66" containerID="b508331b42f4214dc72bafcb3639996c2d4a5c8ee125e13d01a042b880f81f7b" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.251922 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" event={"ID":"6e48e594-66b4-4d88-823f-2ed90fa79d66","Type":"ContainerDied","Data":"b508331b42f4214dc72bafcb3639996c2d4a5c8ee125e13d01a042b880f81f7b"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.252829 4689 scope.go:117] "RemoveContainer" containerID="b508331b42f4214dc72bafcb3639996c2d4a5c8ee125e13d01a042b880f81f7b" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.254929 4689 generic.go:334] "Generic (PLEG): container finished" podID="39db2be1-cb37-4ca9-af8a-5ce0f2d1db16" containerID="065b1a67e2dc6e27c49740632892692de3ce0e27cea59be1df19c3444e332806" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.254978 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" event={"ID":"39db2be1-cb37-4ca9-af8a-5ce0f2d1db16","Type":"ContainerDied","Data":"065b1a67e2dc6e27c49740632892692de3ce0e27cea59be1df19c3444e332806"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.255399 4689 scope.go:117] "RemoveContainer" containerID="065b1a67e2dc6e27c49740632892692de3ce0e27cea59be1df19c3444e332806" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.260962 4689 generic.go:334] "Generic (PLEG): container finished" podID="167d35d1-8eb3-492e-beb3-4325d183c7b9" containerID="2c5d14c17f4d9b1342dc30203394b0cd59e21d00892248c3f95710febffc7829" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.261074 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" event={"ID":"167d35d1-8eb3-492e-beb3-4325d183c7b9","Type":"ContainerDied","Data":"2c5d14c17f4d9b1342dc30203394b0cd59e21d00892248c3f95710febffc7829"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.261879 4689 scope.go:117] "RemoveContainer" containerID="2c5d14c17f4d9b1342dc30203394b0cd59e21d00892248c3f95710febffc7829" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.264570 4689 generic.go:334] "Generic (PLEG): container finished" podID="1f6a7f15-609b-414e-8119-366afe98811f" containerID="6ef91b3993663b9fbf14d57c88607e3ef317bce08b95696400ada06c891ce577" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.264635 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" event={"ID":"1f6a7f15-609b-414e-8119-366afe98811f","Type":"ContainerDied","Data":"6ef91b3993663b9fbf14d57c88607e3ef317bce08b95696400ada06c891ce577"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.265439 4689 scope.go:117] "RemoveContainer" containerID="6ef91b3993663b9fbf14d57c88607e3ef317bce08b95696400ada06c891ce577" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.270898 4689 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.270976 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"7f0f5ab13f16001e01ce20e7d379b04d814fb8afd24b02eb5f459bb051c37afa"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.282301 4689 generic.go:334] "Generic (PLEG): container finished" podID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerID="99e38766149d092ea1445a9f449e7740604ee04a96eac6479ea7d55dedd7919b" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.282375 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" event={"ID":"a8f4f0c7-61db-4423-8f3a-229e4ac94951","Type":"ContainerDied","Data":"99e38766149d092ea1445a9f449e7740604ee04a96eac6479ea7d55dedd7919b"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.304441 4689 generic.go:334] "Generic (PLEG): container finished" podID="4922b965-fa40-47b5-b388-e63767b62a97" containerID="49aa2056ed8d32dc8f5f9a4763e0c4b749d6c60c8ad22236def4dfb0b79215a2" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.304530 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" event={"ID":"4922b965-fa40-47b5-b388-e63767b62a97","Type":"ContainerDied","Data":"49aa2056ed8d32dc8f5f9a4763e0c4b749d6c60c8ad22236def4dfb0b79215a2"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.305363 4689 scope.go:117] "RemoveContainer" containerID="49aa2056ed8d32dc8f5f9a4763e0c4b749d6c60c8ad22236def4dfb0b79215a2" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.317639 4689 generic.go:334] "Generic (PLEG): container finished" podID="af5f2d1f-74a0-4ac2-9e78-c81c3815f722" containerID="1040e6fbc746f9335cc73efbca7f4ca3966c7a5cc50a703175484ce84927f336" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.317695 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" event={"ID":"af5f2d1f-74a0-4ac2-9e78-c81c3815f722","Type":"ContainerDied","Data":"1040e6fbc746f9335cc73efbca7f4ca3966c7a5cc50a703175484ce84927f336"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.318493 4689 scope.go:117] "RemoveContainer" containerID="1040e6fbc746f9335cc73efbca7f4ca3966c7a5cc50a703175484ce84927f336" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.333312 4689 generic.go:334] "Generic (PLEG): container finished" podID="2bd7a193-5394-452e-9315-0332e4a4e667" containerID="5a4c7dcb3b69d6fe536600b96c035f8b9e4dccea8f5d05702d0965037a3c48e7" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.333378 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" event={"ID":"2bd7a193-5394-452e-9315-0332e4a4e667","Type":"ContainerDied","Data":"5a4c7dcb3b69d6fe536600b96c035f8b9e4dccea8f5d05702d0965037a3c48e7"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.334138 4689 scope.go:117] "RemoveContainer" containerID="5a4c7dcb3b69d6fe536600b96c035f8b9e4dccea8f5d05702d0965037a3c48e7" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.336062 4689 generic.go:334] "Generic (PLEG): container finished" podID="53597531-35c9-4478-95cc-690c554f04d0" containerID="ef07d0ae36d941c862210e8f123a8a2796d41e0ae432c3c9b206097fa1e22fbd" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.336159 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" event={"ID":"53597531-35c9-4478-95cc-690c554f04d0","Type":"ContainerDied","Data":"ef07d0ae36d941c862210e8f123a8a2796d41e0ae432c3c9b206097fa1e22fbd"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.337237 4689 scope.go:117] "RemoveContainer" containerID="ef07d0ae36d941c862210e8f123a8a2796d41e0ae432c3c9b206097fa1e22fbd" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.340507 4689 generic.go:334] "Generic (PLEG): container finished" podID="d587cb55-dfd2-42e6-bb32-3a4202dd05c5" containerID="d817518e10ded0a20c77dbb519c27df287aa0f4b77038b9fc486142ea60aab2d" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.340579 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" event={"ID":"d587cb55-dfd2-42e6-bb32-3a4202dd05c5","Type":"ContainerDied","Data":"d817518e10ded0a20c77dbb519c27df287aa0f4b77038b9fc486142ea60aab2d"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.341449 4689 scope.go:117] "RemoveContainer" containerID="d817518e10ded0a20c77dbb519c27df287aa0f4b77038b9fc486142ea60aab2d" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.345382 4689 generic.go:334] "Generic (PLEG): container finished" podID="b5e62e31-60a7-4964-b3e7-611e7a8bfa81" containerID="bfa358ceac7caa6a34be83da1dd2aa0c4e98c78661c0e6d4351376051e9bc705" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.345502 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" event={"ID":"b5e62e31-60a7-4964-b3e7-611e7a8bfa81","Type":"ContainerDied","Data":"bfa358ceac7caa6a34be83da1dd2aa0c4e98c78661c0e6d4351376051e9bc705"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.346896 4689 scope.go:117] "RemoveContainer" containerID="bfa358ceac7caa6a34be83da1dd2aa0c4e98c78661c0e6d4351376051e9bc705" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.359751 4689 generic.go:334] "Generic (PLEG): container finished" podID="a9f05c03-72c2-4906-b327-df50d5922d28" containerID="88f55ee37f1a238a42ca14ce9589a8e9bf6e8c2e9ce65987b81dc450d922e7be" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.359830 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" event={"ID":"a9f05c03-72c2-4906-b327-df50d5922d28","Type":"ContainerDied","Data":"88f55ee37f1a238a42ca14ce9589a8e9bf6e8c2e9ce65987b81dc450d922e7be"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.360453 4689 scope.go:117] "RemoveContainer" containerID="88f55ee37f1a238a42ca14ce9589a8e9bf6e8c2e9ce65987b81dc450d922e7be" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.366825 4689 generic.go:334] "Generic (PLEG): container finished" podID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerID="454a4c74dbb6a1652d2c0e501a9c5aa9bf1591e3de67f7eb8f95d1bf4f84cab4" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.366893 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" event={"ID":"9c5d05df-7a1c-4c0c-b791-cc8e932d2560","Type":"ContainerDied","Data":"454a4c74dbb6a1652d2c0e501a9c5aa9bf1591e3de67f7eb8f95d1bf4f84cab4"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.373621 4689 generic.go:334] "Generic (PLEG): container finished" podID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerID="ca718988dcb3cd7101ef11ad17e6b71775bccaa96d02175dba36c85f9a2cc7f7" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.373702 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerDied","Data":"ca718988dcb3cd7101ef11ad17e6b71775bccaa96d02175dba36c85f9a2cc7f7"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.401885 4689 generic.go:334] "Generic (PLEG): container finished" podID="5f4d15d8-f941-4082-ab51-3ecda5527f9b" containerID="7639c1a58c928d80666ad2d99cd99080343a84f5acc53dc271feb3745392ffd4" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.402061 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" event={"ID":"5f4d15d8-f941-4082-ab51-3ecda5527f9b","Type":"ContainerDied","Data":"7639c1a58c928d80666ad2d99cd99080343a84f5acc53dc271feb3745392ffd4"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.402963 4689 scope.go:117] "RemoveContainer" containerID="7639c1a58c928d80666ad2d99cd99080343a84f5acc53dc271feb3745392ffd4" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.408706 4689 generic.go:334] "Generic (PLEG): container finished" podID="72fb2e87-da8d-4db1-b255-d38d7c15b5cd" containerID="373dd3741255b656c7658abc5192c58adfe9fef3da3d9e2d7aa69ce601c83b26" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.408789 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" event={"ID":"72fb2e87-da8d-4db1-b255-d38d7c15b5cd","Type":"ContainerDied","Data":"373dd3741255b656c7658abc5192c58adfe9fef3da3d9e2d7aa69ce601c83b26"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.410686 4689 scope.go:117] "RemoveContainer" containerID="373dd3741255b656c7658abc5192c58adfe9fef3da3d9e2d7aa69ce601c83b26" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.421530 4689 generic.go:334] "Generic (PLEG): container finished" podID="e69fb667-9cde-4376-b12f-2847b0142176" containerID="1310f2493d20a187538f0e0e33f0c733c42bb03aef4a7942b922c34994a6316c" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.421779 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" event={"ID":"e69fb667-9cde-4376-b12f-2847b0142176","Type":"ContainerDied","Data":"1310f2493d20a187538f0e0e33f0c733c42bb03aef4a7942b922c34994a6316c"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.430111 4689 generic.go:334] "Generic (PLEG): container finished" podID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerID="2cc243468ad8665b4783027e70c09b12e489c4842e28679a5bcbefdbef595e4b" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.430268 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" event={"ID":"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3","Type":"ContainerDied","Data":"2cc243468ad8665b4783027e70c09b12e489c4842e28679a5bcbefdbef595e4b"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.432607 4689 generic.go:334] "Generic (PLEG): container finished" podID="d55b5d87-6f4b-4eb7-bfc7-025b936cebb9" containerID="8770209dbeff0684463277c6bc80ef4f207626e0798fda64fcead3dad54d7f4a" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.432646 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" event={"ID":"d55b5d87-6f4b-4eb7-bfc7-025b936cebb9","Type":"ContainerDied","Data":"8770209dbeff0684463277c6bc80ef4f207626e0798fda64fcead3dad54d7f4a"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.433483 4689 scope.go:117] "RemoveContainer" containerID="8770209dbeff0684463277c6bc80ef4f207626e0798fda64fcead3dad54d7f4a" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.439339 4689 generic.go:334] "Generic (PLEG): container finished" podID="3369528a-f39f-4e47-92e9-abbca4395b98" containerID="da6f8c5470abbd67ebf7fded1b590e1d6e3441a90e12b60b5ea7f7ab81f06250" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.439401 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" event={"ID":"3369528a-f39f-4e47-92e9-abbca4395b98","Type":"ContainerDied","Data":"da6f8c5470abbd67ebf7fded1b590e1d6e3441a90e12b60b5ea7f7ab81f06250"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.440055 4689 scope.go:117] "RemoveContainer" containerID="da6f8c5470abbd67ebf7fded1b590e1d6e3441a90e12b60b5ea7f7ab81f06250" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.446420 4689 generic.go:334] "Generic (PLEG): container finished" podID="c9dc7063-1b29-40e1-b451-e9dc882e7476" containerID="a00e670a2fc7896199c14ae596ab719780e9a042a4b14427aa186d8f5865f7fa" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.446483 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" event={"ID":"c9dc7063-1b29-40e1-b451-e9dc882e7476","Type":"ContainerDied","Data":"a00e670a2fc7896199c14ae596ab719780e9a042a4b14427aa186d8f5865f7fa"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.447234 4689 scope.go:117] "RemoveContainer" containerID="a00e670a2fc7896199c14ae596ab719780e9a042a4b14427aa186d8f5865f7fa" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.449461 4689 generic.go:334] "Generic (PLEG): container finished" podID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerID="69e3263f615b7a5c939fd2604f8e31d807c6746b23f541b888f7e11cf09c7d2e" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.449503 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" event={"ID":"45cadce8-d2da-450b-9b37-c2a6b2a1c595","Type":"ContainerDied","Data":"69e3263f615b7a5c939fd2604f8e31d807c6746b23f541b888f7e11cf09c7d2e"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.450625 4689 generic.go:334] "Generic (PLEG): container finished" podID="d36ac685-507d-4cfa-b6fe-7f595536c32f" containerID="a50c724c621aeb491390ac358c12ca1c2f848851aff767863477263db4e81fb6" exitCode=1 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.450665 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" event={"ID":"d36ac685-507d-4cfa-b6fe-7f595536c32f","Type":"ContainerDied","Data":"a50c724c621aeb491390ac358c12ca1c2f848851aff767863477263db4e81fb6"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.450980 4689 scope.go:117] "RemoveContainer" containerID="a50c724c621aeb491390ac358c12ca1c2f848851aff767863477263db4e81fb6" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.467228 4689 generic.go:334] "Generic (PLEG): container finished" podID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerID="0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.467299 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4hdbj" event={"ID":"b7c32de2-03fb-4b12-8fdf-69161c24eed2","Type":"ContainerDied","Data":"0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.470882 4689 generic.go:334] "Generic (PLEG): container finished" podID="9d058744-ff52-4a7c-8e44-86c81270e7d1" containerID="3228ffcb48d7c3d7668df8877738301ff810d2f4229fe537aba32e0352d31988" exitCode=0 Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.472849 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" event={"ID":"9d058744-ff52-4a7c-8e44-86c81270e7d1","Type":"ContainerDied","Data":"3228ffcb48d7c3d7668df8877738301ff810d2f4229fe537aba32e0352d31988"} Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.474204 4689 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-flfwq container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" start-of-body= Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.474248 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" podUID="e7dd97d0-dc97-4bfa-9048-a065d03576ad" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.69:8443/healthz\": dial tcp 10.217.0.69:8443: connect: connection refused" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.474364 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.474406 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.477085 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.477145 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.503832 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.788685 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" probeResult="failure" output="command timed out" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.788887 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.812014 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" probeResult="failure" output="command timed out" Jan 23 12:13:47 crc kubenswrapper[4689]: I0123 12:13:47.822832 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.006572 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.055239 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.055350 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.121442 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.161454 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.246652 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.246713 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.303268 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.306249 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.321629 4689 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" start-of-body= Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.321678 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": dial tcp 192.168.126.11:10357: connect: connection refused" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.329895 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 12:13:48 crc kubenswrapper[4689]: E0123 12:13:48.361809 4689 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53597531_35c9_4478_95cc_690c554f04d0.slice/crio-conmon-ef07d0ae36d941c862210e8f123a8a2796d41e0ae432c3c9b206097fa1e22fbd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7c32de2_03fb_4b12_8fdf_69161c24eed2.slice/crio-conmon-0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podefa605e2_690e_48af_9218_a8826ead1e88.slice/crio-396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9f05c03_72c2_4906_b327_df50d5922d28.slice/crio-88f55ee37f1a238a42ca14ce9589a8e9bf6e8c2e9ce65987b81dc450d922e7be.scope\": RecentStats: unable to find data in memory cache]" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.401067 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.401125 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.443299 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.458519 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.458570 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.502984 4689 generic.go:334] "Generic (PLEG): container finished" podID="b1a77706-f3e5-48b9-95b8-5f13daa0d29f" containerID="df50f5f45c52f9d981f8f487a59fd2126408503d8b7b322713d9bee13c623344" exitCode=1 Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.503082 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" event={"ID":"b1a77706-f3e5-48b9-95b8-5f13daa0d29f","Type":"ContainerDied","Data":"df50f5f45c52f9d981f8f487a59fd2126408503d8b7b322713d9bee13c623344"} Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.504130 4689 scope.go:117] "RemoveContainer" containerID="df50f5f45c52f9d981f8f487a59fd2126408503d8b7b322713d9bee13c623344" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.528552 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.528597 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.534262 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-vllhz" Jan 23 12:13:48 crc kubenswrapper[4689]: E0123 12:13:48.556026 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620 is running failed: container process not found" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:48 crc kubenswrapper[4689]: E0123 12:13:48.559971 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620 is running failed: container process not found" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:48 crc kubenswrapper[4689]: E0123 12:13:48.560689 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620 is running failed: container process not found" containerID="0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:48 crc kubenswrapper[4689]: E0123 12:13:48.560722 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0032bf8c9f36cecf4c6b4c1c5ad1c3d6ed4c2013212be5317c0082f39c8b3620 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.577661 4689 generic.go:334] "Generic (PLEG): container finished" podID="efa605e2-690e-48af-9218-a8826ead1e88" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" exitCode=0 Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.578851 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerDied","Data":"396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18"} Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.580048 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.580090 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.587286 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="heat-engine" containerStatusID={"Type":"cri-o","ID":"412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84"} pod="openstack/heat-engine-7db46dddd6-txhvk" containerMessage="Container heat-engine failed liveness probe, will be restarted" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.587391 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" containerID="cri-o://412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" gracePeriod=60 Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.615531 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.617195 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.650568 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-76788598db-rgsmc" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.692278 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": dial tcp 10.217.0.5:8081: connect: connection refused" start-of-body= Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.692587 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": dial tcp 10.217.0.5:8081: connect: connection refused" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.732617 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-69d9546745-6sm7h" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.750582 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.750712 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.837517 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" containerID="cri-o://39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8" gracePeriod=18 Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.852360 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.852415 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.889364 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.889409 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.889537 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-7qpjs" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.916173 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" containerID="cri-o://c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c" gracePeriod=15 Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.933120 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.933226 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 12:13:48 crc kubenswrapper[4689]: I0123 12:13:48.943835 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8cc0bc0c-47d7-48d8-bfba-a9694ab485a0" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.165:9090/-/ready\": dial tcp 10.217.0.165:9090: connect: connection refused" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.028136 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.051189 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.329799 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.336632 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]log ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:13:49 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:13:49 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:13:49 crc kubenswrapper[4689]: readyz check failed Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.336683 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.403345 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.403410 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.422391 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.422506 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.689630 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.749012 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Jan 23 12:13:49 crc kubenswrapper[4689]: I0123 12:13:49.848176 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.319295 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988 is running failed: container process not found" containerID="0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.321305 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-78f56cd898-f5fg2" Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.325960 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988 is running failed: container process not found" containerID="0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.326633 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988 is running failed: container process not found" containerID="0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.326673 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0fdfeae752c965da512097fb9f3fb9bc78b5f1cec806671a9a5846064d414988 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.539073 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.540712 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.541992 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.542033 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.604399 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.640605 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" event={"ID":"f451d39d-2f3f-4c53-b5a2-d8e7f74247f9","Type":"ContainerStarted","Data":"d35d5b84378409a4270b8938f2fe55f4f66c39f4e4948e31c1174c966fa862a8"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.642067 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.648818 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" event={"ID":"a8f4f0c7-61db-4423-8f3a-229e4ac94951","Type":"ContainerStarted","Data":"1f404c840d743b8095c42f74a33bb0c8e3e54023faeaeeaa551506f4e482b449"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.648990 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.667910 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" event={"ID":"e69fb667-9cde-4376-b12f-2847b0142176","Type":"ContainerStarted","Data":"1e1fae58bab316e0aa795de888a08b99888c064bcb1203b1e56fbbb0006378aa"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.668360 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.668522 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.668559 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.672054 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" event={"ID":"3377dc50-b5b0-40d0-9b16-295713320fcd","Type":"ContainerStarted","Data":"58514a31d103473ef0cba4948da9158e1500f1985ca33717084728286d08bd03"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.673756 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.673853 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.673892 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.676198 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" event={"ID":"7ad0b754-e721-4b19-b0b6-a7e1200a48d4","Type":"ContainerStarted","Data":"6adc10138e626fa93b23593864b0b617a1c522e0a06c66a84164e666411a1a71"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.676560 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.741734 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.752567 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e77ccc9cbcbfaab86a6637bd586332de42103ca24428491dc90e030aecb96198"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.762535 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" event={"ID":"8359ad74-2a40-4f5f-afe6-880a3f0a990e","Type":"ContainerStarted","Data":"cb2657da82e74029ffd25d27c22aca9101146250a3d96ce633d3615ded951672"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.763467 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.771939 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" event={"ID":"a9f05c03-72c2-4906-b327-df50d5922d28","Type":"ContainerStarted","Data":"c1be5d793b17c0d5f1e289b357efd3b19f1966a736800c5d7c282e625ea80b95"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.772107 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.775246 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"454a3789f454a93321e2864177551f6216887ecc01c222655d23de9e649e8edd"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.775330 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.782820 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" event={"ID":"72fb2e87-da8d-4db1-b255-d38d7c15b5cd","Type":"ContainerStarted","Data":"196528d9b7e215e900ccfed4e52945dd546ba9899c851902dfbfd4c5c0b86f66"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.782930 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.795590 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" event={"ID":"167d35d1-8eb3-492e-beb3-4325d183c7b9","Type":"ContainerStarted","Data":"ed1baf36ef6254d467820b9bc77bf370b65a6a7543107c3c2bbada5f6ea5814d"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.795827 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.818499 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18 is running failed: container process not found" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.819437 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18 is running failed: container process not found" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.821364 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18 is running failed: container process not found" containerID="396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18" cmd=["sh","-c","if [ -x \"$(command -v curl)\" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x \"$(command -v wget)\" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.821438 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 396c8d68db03ec215f4b621c6b53836f158aee0a15193bfcc2fa3d8e5b2eab18 is running failed: container process not found" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="efa605e2-690e-48af-9218-a8826ead1e88" containerName="prometheus" Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.821442 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" event={"ID":"28a286e0-4072-40b0-aa95-4a12299f5a72","Type":"ContainerStarted","Data":"4f2665140c1ceccbab284af454ba65f8720d3b6cca87ddda45aceffceda77267"} Jan 23 12:13:50 crc kubenswrapper[4689]: I0123 12:13:50.821643 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.913429 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb is running failed: container process not found" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.913868 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb is running failed: container process not found" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.914270 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb is running failed: container process not found" containerID="ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:50 crc kubenswrapper[4689]: E0123 12:13:50.914337 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ec85cf4a4c6c072168652e96af4d5dd17bdab79031b331cf85e4a5a1f23b47fb is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.458618 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.459047 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.572820 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-2r5kt" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.652879 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.653305 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.835053 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8cc0bc0c-47d7-48d8-bfba-a9694ab485a0","Type":"ContainerStarted","Data":"e95d02ebe967468fdf3159da8056900600efed1bb7cf840aa7cde3026fbb416b"} Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.842958 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" event={"ID":"af5f2d1f-74a0-4ac2-9e78-c81c3815f722","Type":"ContainerStarted","Data":"6db5d28d4ec6abcdf9640b12f0a30be5fd9952f20bd6d8383794e91930154f8c"} Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.843797 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.843873 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.844045 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.844073 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.844241 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:51 crc kubenswrapper[4689]: I0123 12:13:51.869406 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-775d8c8b9c-rkqj2" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.512159 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5859dc97b8-47f8f" podUID="5d2085e7-92df-4502-97e9-66dfbfae189a" containerName="console" containerID="cri-o://466f16cbff91ecef476fbf44e262293aa570836a2c90bc368fc4dc6e34942c0b" gracePeriod=15 Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.615839 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.618295 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.620084 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.620186 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.747859 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892 is running failed: container process not found" containerID="6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.748477 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892 is running failed: container process not found" containerID="6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.749069 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892 is running failed: container process not found" containerID="6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 12:13:52 crc kubenswrapper[4689]: E0123 12:13:52.749314 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ae4ace6ef7008744d985a700c7919ec3a208f7107bcf29cea5fafb5a180f892 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.855670 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6w2m" event={"ID":"5f0a7d6b-1743-49ab-9f0b-2742ce992ecf","Type":"ContainerStarted","Data":"a9b4c4c8688a537af04293334e07b8bcb385611c790730732312aa954f94f782"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.858183 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b2s75" event={"ID":"b1a77706-f3e5-48b9-95b8-5f13daa0d29f","Type":"ContainerStarted","Data":"d982260f86b5fc5611bcda49cb700819c5c9f6c58f16d3916b200bd995b42476"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.862541 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"efa605e2-690e-48af-9218-a8826ead1e88","Type":"ContainerStarted","Data":"61d37a5af563e348ffb1241ea17756ec5e2c3a46140f1e830cca44c975270729"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.865261 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9gvwz" event={"ID":"9d058744-ff52-4a7c-8e44-86c81270e7d1","Type":"ContainerStarted","Data":"9537d728b96a0a91bebc37c35bf87249edd44cdedcfbe0b8093f78383c39a456"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.868936 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" event={"ID":"54403d19-67da-4783-8b45-b7070bc15424","Type":"ContainerStarted","Data":"7f1d5200b85ea36bd6f42965aaecc307ba8c98c69327cc52d13efb5f87a90d83"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.871276 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" event={"ID":"39db2be1-cb37-4ca9-af8a-5ce0f2d1db16","Type":"ContainerStarted","Data":"798c3aee2e2e1f96cff042ed26ba133f976d34187cf0f7ddd928462db3d1f321"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.873869 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" event={"ID":"d55b5d87-6f4b-4eb7-bfc7-025b936cebb9","Type":"ContainerStarted","Data":"7ecf3a67c03a8eccc347b8c0e76044b5fa3b6107a688935bc834eebd903b5b40"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.873951 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.875742 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" event={"ID":"b5e62e31-60a7-4964-b3e7-611e7a8bfa81","Type":"ContainerStarted","Data":"d6d6e8ddd87d0e7216ef1c125eaf8df133b53f792c7fd276eabf9bf656a6b144"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.879554 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x87gr" event={"ID":"5fbbf7f9-c268-4a7b-a278-4f72a9099acf","Type":"ContainerStarted","Data":"2b48d4c2ea322d8ab27b4c8d28af17b93a6da5c6fe0e7e0ae024e3a517bff883"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.882485 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4zskl" event={"ID":"0cb158e3-50d7-4750-8f95-c22d0a94a70f","Type":"ContainerStarted","Data":"996db49acda1f5d97d38588c9e60f753e034aced12427349eeefb60a9e869838"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.885494 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" event={"ID":"c9dc7063-1b29-40e1-b451-e9dc882e7476","Type":"ContainerStarted","Data":"80b7a89026e3d81a3bf7662554286ffbedacd5f44d80b7dd6d68cd8482d7f95d"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.889863 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d9bfx" event={"ID":"d92e2c5f-df9d-44e5-839c-806799a650a4","Type":"ContainerStarted","Data":"3eae4eadfe04bc134db897f21758b58a31a5121f9944737eda6b4a686512b8fa"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.897627 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerStarted","Data":"c373abcc771c979cf5a24cc5f09bb29da6f8f40ac65b6320f1cbfd5fc2a215bc"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.903454 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" event={"ID":"45cadce8-d2da-450b-9b37-c2a6b2a1c595","Type":"ContainerStarted","Data":"52815cf24498a0407497c734cd72be86136bc28711b05bef0f15ba602b0c6c55"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.906498 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" event={"ID":"d587cb55-dfd2-42e6-bb32-3a4202dd05c5","Type":"ContainerStarted","Data":"95e69f578fd07b8bee0dd861092522e3550d61995cdcb8efcbf1f0e3951718a0"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.908181 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" event={"ID":"5f4d15d8-f941-4082-ab51-3ecda5527f9b","Type":"ContainerStarted","Data":"284fc6c3d8944f50866c57c948b6470de8a7d82560c1a1f376d6c0497affbb80"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.910057 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" event={"ID":"2bd7a193-5394-452e-9315-0332e4a4e667","Type":"ContainerStarted","Data":"3280351fe93ff7334157085fde3813183bac501c31fa1ce713d88f81fee13984"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.912984 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ingress_router-default-5444994796-qvgvs_c7b42454-f36b-4ab0-86d0-a2decba67e28/router/0.log" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.913065 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qvgvs" event={"ID":"c7b42454-f36b-4ab0-86d0-a2decba67e28","Type":"ContainerStarted","Data":"5f15a1dc413b970f6ecaed321f53a7c824225da3cdd2cb59019be7d282b75e51"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.919212 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" event={"ID":"d36ac685-507d-4cfa-b6fe-7f595536c32f","Type":"ContainerStarted","Data":"eb5a359eb1d152777a423ccbd34a0dff84797ef1205b755a7940195fa0fc2784"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.921925 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" event={"ID":"53597531-35c9-4478-95cc-690c554f04d0","Type":"ContainerStarted","Data":"b564aa11fa9edec6f1416b84fecdfc385538e8cc5f9b28ccfa20a3f91321b5a5"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.922132 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.924702 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4hdbj" event={"ID":"b7c32de2-03fb-4b12-8fdf-69161c24eed2","Type":"ContainerStarted","Data":"c34bacfd01559d78d3864694665cee19a8d43802c7ce5b0134fad9153ed767c9"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.930080 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5859dc97b8-47f8f_5d2085e7-92df-4502-97e9-66dfbfae189a/console/0.log" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.930126 4689 generic.go:334] "Generic (PLEG): container finished" podID="5d2085e7-92df-4502-97e9-66dfbfae189a" containerID="466f16cbff91ecef476fbf44e262293aa570836a2c90bc368fc4dc6e34942c0b" exitCode=2 Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.930186 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5859dc97b8-47f8f" event={"ID":"5d2085e7-92df-4502-97e9-66dfbfae189a","Type":"ContainerDied","Data":"466f16cbff91ecef476fbf44e262293aa570836a2c90bc368fc4dc6e34942c0b"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.932402 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" event={"ID":"4922b965-fa40-47b5-b388-e63767b62a97","Type":"ContainerStarted","Data":"4d7bbf01aa78b65a95528ce313c09e1385960750899a86cb2f6f04a5de02938f"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.932622 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.934540 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" event={"ID":"3369528a-f39f-4e47-92e9-abbca4395b98","Type":"ContainerStarted","Data":"574c9c4c765b06a28e5d2e7190d758accf5503541222da2920875869a5edf996"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.934785 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.937705 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" event={"ID":"3d3561eb-7369-4466-b9ee-037e02b2c219","Type":"ContainerStarted","Data":"e5398fadceca7807c4797c442606157f767def61f67a65f27fed34eee715884d"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.940138 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" event={"ID":"6e48e594-66b4-4d88-823f-2ed90fa79d66","Type":"ContainerStarted","Data":"3785bb0290eb034f43bd8a81e053c2cebc1f1eda8bc9ce910c8e0d71cb43763f"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.942701 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" event={"ID":"9c5d05df-7a1c-4c0c-b791-cc8e932d2560","Type":"ContainerStarted","Data":"a911b1dbca0180727f6510d989b58cc3c92b07df6963f0cf5b792313a34bc96a"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.943687 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.944213 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.944245 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.946192 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" event={"ID":"1f6a7f15-609b-414e-8119-366afe98811f","Type":"ContainerStarted","Data":"bd493b847b34b6edeb0520e3ea87086fb55d48884838cac0da7a1d29a717a795"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.949642 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" event={"ID":"5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3","Type":"ContainerStarted","Data":"8227fedf943b262fb0a3efcf998c6ad868b1462a175532f152a2891da4bc81c5"} Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.950989 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 23 12:13:52 crc kubenswrapper[4689]: I0123 12:13:52.951040 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 23 12:13:53 crc kubenswrapper[4689]: I0123 12:13:53.062219 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-flfwq" Jan 23 12:13:53 crc kubenswrapper[4689]: I0123 12:13:53.422392 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:53 crc kubenswrapper[4689]: I0123 12:13:53.943486 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 23 12:13:53 crc kubenswrapper[4689]: I0123 12:13:53.950220 4689 patch_prober.go:28] interesting pod/route-controller-manager-7cc8986677-69l76 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:13:53 crc kubenswrapper[4689]: I0123 12:13:53.950281 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" podUID="e69fb667-9cde-4376-b12f-2847b0142176" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.027735 4689 generic.go:334] "Generic (PLEG): container finished" podID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerID="39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8" exitCode=0 Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.030059 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.030124 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.027793 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cab355b0-25b6-4ad4-83ad-718ae756ae29","Type":"ContainerDied","Data":"39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8"} Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.030738 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.030784 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.030941 4689 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-95bv6 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.5:8081/healthz\": dial tcp 10.217.0.5:8081: connect: connection refused" start-of-body= Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.030963 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" podUID="45cadce8-d2da-450b-9b37-c2a6b2a1c595" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.5:8081/healthz\": dial tcp 10.217.0.5:8081: connect: connection refused" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.034898 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 12:13:54 crc kubenswrapper[4689]: E0123 12:13:54.202012 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8 is running failed: container process not found" containerID="39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 23 12:13:54 crc kubenswrapper[4689]: E0123 12:13:54.202634 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8 is running failed: container process not found" containerID="39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 23 12:13:54 crc kubenswrapper[4689]: E0123 12:13:54.202917 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8 is running failed: container process not found" containerID="39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 23 12:13:54 crc kubenswrapper[4689]: E0123 12:13:54.203005 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 39dcfdd7a897b3fd701150f03a67113c8965f0abd5094686f968b74db62fb4b8 is running failed: container process not found" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.244865 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.246702 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.246763 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.323799 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]log ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:13:54 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:13:54 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:13:54 crc kubenswrapper[4689]: readyz check failed Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.323857 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.431773 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.458391 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.471779 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.471834 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.472087 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.472214 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.474859 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zhh2t" Jan 23 12:13:54 crc kubenswrapper[4689]: I0123 12:13:54.768882 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8545zczg" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.041527 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cab355b0-25b6-4ad4-83ad-718ae756ae29","Type":"ContainerStarted","Data":"7f370f979b4058b3e0f499f1ca158c0b5aadfdfa6ce73951587c01a0a0ab69b1"} Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.044107 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5859dc97b8-47f8f_5d2085e7-92df-4502-97e9-66dfbfae189a/console/0.log" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.044194 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5859dc97b8-47f8f" event={"ID":"5d2085e7-92df-4502-97e9-66dfbfae189a","Type":"ContainerStarted","Data":"483abcfd84bc396a8f3a3179a8e4fe9fc210082037921bfd22d94d4147e7e670"} Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.044700 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.044746 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.044816 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.044876 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.045436 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.168020 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.168070 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.168087 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.168128 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.234731 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.246541 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.246767 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.246805 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.496386 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xvxpp" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.546780 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.546888 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.693296 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.798682 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.799181 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Jan 23 12:13:55 crc kubenswrapper[4689]: I0123 12:13:55.799241 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Jan 23 12:13:56 crc kubenswrapper[4689]: I0123 12:13:56.246249 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 23 12:13:56 crc kubenswrapper[4689]: I0123 12:13:56.246322 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 23 12:13:56 crc kubenswrapper[4689]: I0123 12:13:56.434039 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:56 crc kubenswrapper[4689]: I0123 12:13:56.434386 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 23 12:13:56 crc kubenswrapper[4689]: I0123 12:13:56.435232 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-scheduler" containerStatusID={"Type":"cri-o","ID":"d3dd4e2cf5fd0390b09097eb990bae71c1a878f4111c85e60e6bc164711fdefa"} pod="openstack/cinder-scheduler-0" containerMessage="Container cinder-scheduler failed liveness probe, will be restarted" Jan 23 12:13:56 crc kubenswrapper[4689]: I0123 12:13:56.435284 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" containerID="cri-o://d3dd4e2cf5fd0390b09097eb990bae71c1a878f4111c85e60e6bc164711fdefa" gracePeriod=30 Jan 23 12:13:56 crc kubenswrapper[4689]: I0123 12:13:56.813534 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7cc8986677-69l76" Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.253645 4689 patch_prober.go:28] interesting pod/router-default-5444994796-qvgvs container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 12:13:57 crc kubenswrapper[4689]: [+]has-synced ok Jan 23 12:13:57 crc kubenswrapper[4689]: [+]process-running ok Jan 23 12:13:57 crc kubenswrapper[4689]: healthz check failed Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.253698 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qvgvs" podUID="c7b42454-f36b-4ab0-86d0-a2decba67e28" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458536 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458586 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458631 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458693 4689 patch_prober.go:28] interesting pod/console-5859dc97b8-47f8f container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.136:8443/health\": dial tcp 10.217.0.136:8443: connect: connection refused" start-of-body= Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458762 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458800 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-5859dc97b8-47f8f" podUID="5d2085e7-92df-4502-97e9-66dfbfae189a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.136:8443/health\": dial tcp 10.217.0.136:8443: connect: connection refused" Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458743 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.458829 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 23 12:13:57 crc kubenswrapper[4689]: I0123 12:13:57.793723 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:57 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:57 crc kubenswrapper[4689]: > Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.006725 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.011371 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-q2clt" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.055783 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.057595 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-s69pd" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.161947 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.164910 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.246973 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.248715 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-86c6v" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.249597 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.255803 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-qvgvs" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.306163 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.309001 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-b8s9h" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.321356 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.339659 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.345493 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-dl6g9" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.402407 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.407060 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-cgxb7" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.445716 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-szbq7" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.515100 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.515713 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.528140 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.531423 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-m8m6r" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.599562 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.618616 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.618765 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.637033 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-2lwwn" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.692126 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.708272 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-95bv6" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.774620 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-5rkch" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.853249 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.890714 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-kklnd" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.937748 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-t97lk" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.944967 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 23 12:13:58 crc kubenswrapper[4689]: I0123 12:13:58.962331 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.032696 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-m2sb7" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.173303 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.177564 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-7dc68b46f7-8szn8" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.330173 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.331863 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]log ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:13:59 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:13:59 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:13:59 crc kubenswrapper[4689]: readyz check failed Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.331926 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.336891 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-94c58dc69-75pd4" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.412332 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-rszc5" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.435428 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-j79ts" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.582205 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.641299 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:13:59 crc kubenswrapper[4689]: E0123 12:13:59.641857 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:13:59 crc kubenswrapper[4689]: I0123 12:13:59.711852 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:13:59 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:13:59 crc kubenswrapper[4689]: > Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.002556 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-64d6f55f49-snsq8" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.080194 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.201649 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.315458 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.315794 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.470741 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" Jan 23 12:14:00 crc kubenswrapper[4689]: E0123 12:14:00.534507 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:00 crc kubenswrapper[4689]: E0123 12:14:00.541289 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:00 crc kubenswrapper[4689]: E0123 12:14:00.548399 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:00 crc kubenswrapper[4689]: E0123 12:14:00.548510 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.724106 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-pkdqh" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.741644 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-vcs9z" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.936891 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 12:14:00 crc kubenswrapper[4689]: I0123 12:14:00.937099 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 12:14:01 crc kubenswrapper[4689]: I0123 12:14:01.092969 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7c8c5b48f6-4bw4b" Jan 23 12:14:01 crc kubenswrapper[4689]: I0123 12:14:01.149464 4689 generic.go:334] "Generic (PLEG): container finished" podID="9926a3b2-8d65-4876-b56b-488948df1352" containerID="c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c" exitCode=0 Jan 23 12:14:01 crc kubenswrapper[4689]: I0123 12:14:01.149613 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"9926a3b2-8d65-4876-b56b-488948df1352","Type":"ContainerDied","Data":"c81b4b22041a2d90d82330af799da53bb5324bdb51549d654e252ed319df475c"} Jan 23 12:14:01 crc kubenswrapper[4689]: I0123 12:14:01.149639 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"9926a3b2-8d65-4876-b56b-488948df1352","Type":"ContainerStarted","Data":"8d6f2985ecde669aeb4bb30b5b022b44efd7a76731fb067ac12ba50c5a96b885"} Jan 23 12:14:01 crc kubenswrapper[4689]: I0123 12:14:01.393899 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-mtbb4" Jan 23 12:14:01 crc kubenswrapper[4689]: I0123 12:14:01.423789 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:01 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:01 crc kubenswrapper[4689]: > Jan 23 12:14:02 crc kubenswrapper[4689]: I0123 12:14:02.031974 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:02 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:02 crc kubenswrapper[4689]: > Jan 23 12:14:02 crc kubenswrapper[4689]: I0123 12:14:02.282941 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-tqgjs" Jan 23 12:14:02 crc kubenswrapper[4689]: I0123 12:14:02.612512 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 23 12:14:02 crc kubenswrapper[4689]: I0123 12:14:02.612559 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 23 12:14:02 crc kubenswrapper[4689]: I0123 12:14:02.748349 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 12:14:02 crc kubenswrapper[4689]: I0123 12:14:02.748406 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 12:14:03 crc kubenswrapper[4689]: I0123 12:14:03.181607 4689 generic.go:334] "Generic (PLEG): container finished" podID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerID="d3dd4e2cf5fd0390b09097eb990bae71c1a878f4111c85e60e6bc164711fdefa" exitCode=0 Jan 23 12:14:03 crc kubenswrapper[4689]: I0123 12:14:03.187718 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"543b7065-0dd2-411e-9854-8aaa3e11dd3e","Type":"ContainerDied","Data":"d3dd4e2cf5fd0390b09097eb990bae71c1a878f4111c85e60e6bc164711fdefa"} Jan 23 12:14:03 crc kubenswrapper[4689]: I0123 12:14:03.799524 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:03 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:03 crc kubenswrapper[4689]: > Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.201983 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.202030 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.324766 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]log ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:14:04 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:14:04 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:14:04 crc kubenswrapper[4689]: readyz check failed Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.324842 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.471842 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.471890 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.471894 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:04 crc kubenswrapper[4689]: I0123 12:14:04.471951 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:05 crc kubenswrapper[4689]: I0123 12:14:05.001226 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" Jan 23 12:14:05 crc kubenswrapper[4689]: I0123 12:14:05.173975 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" Jan 23 12:14:05 crc kubenswrapper[4689]: I0123 12:14:05.802131 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" Jan 23 12:14:06 crc kubenswrapper[4689]: I0123 12:14:06.599803 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:06 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:06 crc kubenswrapper[4689]: > Jan 23 12:14:07 crc kubenswrapper[4689]: I0123 12:14:07.066454 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.181171209s: [/var/lib/containers/storage/overlay/b7cc953b5c1f1c9673950ca04f16cc7d1b8529fb0b4f31a55acae0c0204a1f72/diff /var/log/pods/openstack_neutron-6cf4c786cc-4bmzv_3b0b0aa7-a504-49b8-b6b4-5548b6ee7690/neutron-httpd/0.log]; will not log again for this container unless duration exceeds 2s Jan 23 12:14:07 crc kubenswrapper[4689]: I0123 12:14:07.463675 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 12:14:07 crc kubenswrapper[4689]: I0123 12:14:07.468116 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5859dc97b8-47f8f" Jan 23 12:14:08 crc kubenswrapper[4689]: I0123 12:14:08.104280 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" podUID="64f769e0-be75-4b1f-8cbb-587842d51589" containerName="registry" containerID="cri-o://124fdd031e1adbb915bd981f1d70c8b6fbe7b04b210766c7de8d99eaa4f11897" gracePeriod=29 Jan 23 12:14:08 crc kubenswrapper[4689]: I0123 12:14:08.104280 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" containerID="cri-o://ffd3ae9e6aa8933d9d68a18735ff9e6b4b4b12a171b4e366fa7a22da630b6545" gracePeriod=10 Jan 23 12:14:09 crc kubenswrapper[4689]: I0123 12:14:09.277556 4689 generic.go:334] "Generic (PLEG): container finished" podID="8c315092-e9fe-409e-81ca-39ac98b9fea6" containerID="ec83d2b63893c9b67e4f56af15a710ca5f53141c2ccfe908c8ba8da2348c3a8b" exitCode=1 Jan 23 12:14:09 crc kubenswrapper[4689]: I0123 12:14:09.277856 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"8c315092-e9fe-409e-81ca-39ac98b9fea6","Type":"ContainerDied","Data":"ec83d2b63893c9b67e4f56af15a710ca5f53141c2ccfe908c8ba8da2348c3a8b"} Jan 23 12:14:09 crc kubenswrapper[4689]: I0123 12:14:09.284556 4689 generic.go:334] "Generic (PLEG): container finished" podID="5e81a398-514c-4bfa-9038-7ede14a02743" containerID="ffd3ae9e6aa8933d9d68a18735ff9e6b4b4b12a171b4e366fa7a22da630b6545" exitCode=0 Jan 23 12:14:09 crc kubenswrapper[4689]: I0123 12:14:09.284598 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" event={"ID":"5e81a398-514c-4bfa-9038-7ede14a02743","Type":"ContainerDied","Data":"ffd3ae9e6aa8933d9d68a18735ff9e6b4b4b12a171b4e366fa7a22da630b6545"} Jan 23 12:14:09 crc kubenswrapper[4689]: I0123 12:14:09.327544 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]log ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:14:09 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:14:09 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:14:09 crc kubenswrapper[4689]: readyz check failed Jan 23 12:14:09 crc kubenswrapper[4689]: I0123 12:14:09.327614 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:10 crc kubenswrapper[4689]: I0123 12:14:10.135584 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:10 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:10 crc kubenswrapper[4689]: > Jan 23 12:14:10 crc kubenswrapper[4689]: I0123 12:14:10.320789 4689 generic.go:334] "Generic (PLEG): container finished" podID="64f769e0-be75-4b1f-8cbb-587842d51589" containerID="124fdd031e1adbb915bd981f1d70c8b6fbe7b04b210766c7de8d99eaa4f11897" exitCode=0 Jan 23 12:14:10 crc kubenswrapper[4689]: I0123 12:14:10.321365 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" event={"ID":"64f769e0-be75-4b1f-8cbb-587842d51589","Type":"ContainerDied","Data":"124fdd031e1adbb915bd981f1d70c8b6fbe7b04b210766c7de8d99eaa4f11897"} Jan 23 12:14:10 crc kubenswrapper[4689]: E0123 12:14:10.543467 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:10 crc kubenswrapper[4689]: E0123 12:14:10.558510 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:10 crc kubenswrapper[4689]: E0123 12:14:10.588647 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:10 crc kubenswrapper[4689]: E0123 12:14:10.588733 4689 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" Jan 23 12:14:10 crc kubenswrapper[4689]: I0123 12:14:10.642210 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:14:10 crc kubenswrapper[4689]: E0123 12:14:10.642447 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:14:11 crc kubenswrapper[4689]: I0123 12:14:11.360772 4689 generic.go:334] "Generic (PLEG): container finished" podID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerID="3eadc0f88cda2375616ec2db9c2d200b9461f716dcd8caaf83f49b0f88e38114" exitCode=137 Jan 23 12:14:11 crc kubenswrapper[4689]: I0123 12:14:11.361000 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerDied","Data":"3eadc0f88cda2375616ec2db9c2d200b9461f716dcd8caaf83f49b0f88e38114"} Jan 23 12:14:11 crc kubenswrapper[4689]: I0123 12:14:11.400262 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:11 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:11 crc kubenswrapper[4689]: > Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:11.985805 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:12 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:12 crc kubenswrapper[4689]: > Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.210802 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285109 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config-secret\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285400 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ssh-key\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285568 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285609 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ca-certs\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285629 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285670 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfqbp\" (UniqueName: \"kubernetes.io/projected/8c315092-e9fe-409e-81ca-39ac98b9fea6-kube-api-access-vfqbp\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285763 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-config-data\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285798 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-temporary\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.285850 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-workdir\") pod \"8c315092-e9fe-409e-81ca-39ac98b9fea6\" (UID: \"8c315092-e9fe-409e-81ca-39ac98b9fea6\") " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.286674 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.287539 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-config-data" (OuterVolumeSpecName: "config-data") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.291581 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.305810 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c315092-e9fe-409e-81ca-39ac98b9fea6-kube-api-access-vfqbp" (OuterVolumeSpecName: "kube-api-access-vfqbp") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "kube-api-access-vfqbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.305981 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "test-operator-logs") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.365090 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.365379 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.371119 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.375080 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "8c315092-e9fe-409e-81ca-39ac98b9fea6" (UID: "8c315092-e9fe-409e-81ca-39ac98b9fea6"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.380960 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"8c315092-e9fe-409e-81ca-39ac98b9fea6","Type":"ContainerDied","Data":"db62063870d5ffdb61507896753cb535482a06261abeb5c383bb78aaf8771dd2"} Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.381014 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db62063870d5ffdb61507896753cb535482a06261abeb5c383bb78aaf8771dd2" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.381058 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388779 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388814 4689 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388826 4689 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/8c315092-e9fe-409e-81ca-39ac98b9fea6-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388835 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388845 4689 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388853 4689 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8c315092-e9fe-409e-81ca-39ac98b9fea6-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388861 4689 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/8c315092-e9fe-409e-81ca-39ac98b9fea6-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388914 4689 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.388924 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfqbp\" (UniqueName: \"kubernetes.io/projected/8c315092-e9fe-409e-81ca-39ac98b9fea6-kube-api-access-vfqbp\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.429968 4689 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 23 12:14:12 crc kubenswrapper[4689]: I0123 12:14:12.490979 4689 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:13 crc kubenswrapper[4689]: I0123 12:14:13.801822 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:13 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:13 crc kubenswrapper[4689]: > Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.327958 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]log ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:14:14 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:14:14 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:14:14 crc kubenswrapper[4689]: readyz check failed Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.328316 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.414291 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" event={"ID":"64f769e0-be75-4b1f-8cbb-587842d51589","Type":"ContainerStarted","Data":"1739e9e87ec4ebf3eae590b3a2c8d73bf1cbee769ed5bf1164999facdff4dd52"} Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.473439 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.473516 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.473564 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.473636 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.473748 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.474795 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.475614 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.476652 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"cdae71403f1cef069a3db6d73580a5dac2e51b2609813ab908a515617eeccc18"} pod="openshift-console/downloads-7954f5f757-hc5js" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 23 12:14:14 crc kubenswrapper[4689]: I0123 12:14:14.476748 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" containerID="cri-o://cdae71403f1cef069a3db6d73580a5dac2e51b2609813ab908a515617eeccc18" gracePeriod=2 Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.441233 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" event={"ID":"5e81a398-514c-4bfa-9038-7ede14a02743","Type":"ContainerStarted","Data":"834d96b0b64ab9ea695a2c98a1d67ff12690296a6f47125076d2ccb94dbc58f6"} Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.443543 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.443630 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": dial tcp 10.217.0.56:6443: connect: connection refused" start-of-body= Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.443656 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": dial tcp 10.217.0.56:6443: connect: connection refused" Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.449727 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"543b7065-0dd2-411e-9854-8aaa3e11dd3e","Type":"ContainerStarted","Data":"330b4f7c8cd4221b7fd90085fc08889d04e16dbb51e1feee93d9f7a47a39e51d"} Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.449773 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 12:14:15 crc kubenswrapper[4689]: W0123 12:14:15.462392 4689 logging.go:55] [core] [Channel #5318 SubChannel #5319]grpc: addrConn.createTransport failed to connect to {Addr: "/var/lib/kubelet/plugins/csi-hostpath/csi.sock", ServerName: "localhost", }. Err: connection error: desc = "transport: Error while dialing: dial unix /var/lib/kubelet/plugins/csi-hostpath/csi.sock: connect: connection refused" Jan 23 12:14:15 crc kubenswrapper[4689]: W0123 12:14:15.576121 4689 logging.go:55] [core] [Channel #5320 SubChannel #5321]grpc: addrConn.createTransport failed to connect to {Addr: "/var/lib/kubelet/plugins/csi-hostpath/csi.sock", ServerName: "localhost", }. Err: connection error: desc = "transport: Error while dialing: dial unix /var/lib/kubelet/plugins/csi-hostpath/csi.sock: connect: connection refused" Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.770996 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 12:14:15 crc kubenswrapper[4689]: I0123 12:14:15.825460 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-d9bfx" Jan 23 12:14:16 crc kubenswrapper[4689]: I0123 12:14:16.465497 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerStarted","Data":"2670ab62a04e1981e6411863ca94b7bc5d0bb93cfc12a6dffd97959f597067c4"} Jan 23 12:14:16 crc kubenswrapper[4689]: I0123 12:14:16.468480 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_downloads-7954f5f757-hc5js_fa9894a6-c179-4b45-a036-b94c23125162/download-server/0.log" Jan 23 12:14:16 crc kubenswrapper[4689]: I0123 12:14:16.468535 4689 generic.go:334] "Generic (PLEG): container finished" podID="fa9894a6-c179-4b45-a036-b94c23125162" containerID="cdae71403f1cef069a3db6d73580a5dac2e51b2609813ab908a515617eeccc18" exitCode=0 Jan 23 12:14:16 crc kubenswrapper[4689]: I0123 12:14:16.468644 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hc5js" event={"ID":"fa9894a6-c179-4b45-a036-b94c23125162","Type":"ContainerDied","Data":"cdae71403f1cef069a3db6d73580a5dac2e51b2609813ab908a515617eeccc18"} Jan 23 12:14:16 crc kubenswrapper[4689]: I0123 12:14:16.468685 4689 scope.go:117] "RemoveContainer" containerID="d778f1fb3374d50358dab02ff6858745a22ad7313dc78c7dd36f554e7a066555" Jan 23 12:14:16 crc kubenswrapper[4689]: I0123 12:14:16.625072 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" Jan 23 12:14:17 crc kubenswrapper[4689]: I0123 12:14:17.480461 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-hc5js" event={"ID":"fa9894a6-c179-4b45-a036-b94c23125162","Type":"ContainerStarted","Data":"97bb42b894e5d3f658a4fec7dfb3ffeddedfbdd4c7610c759a2b3ef913f14233"} Jan 23 12:14:17 crc kubenswrapper[4689]: I0123 12:14:17.481449 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:17 crc kubenswrapper[4689]: I0123 12:14:17.481531 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:18 crc kubenswrapper[4689]: I0123 12:14:18.406142 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 23 12:14:18 crc kubenswrapper[4689]: I0123 12:14:18.490491 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 12:14:18 crc kubenswrapper[4689]: I0123 12:14:18.490589 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:18 crc kubenswrapper[4689]: I0123 12:14:18.490630 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.324318 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]log ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:14:19 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:14:19 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:14:19 crc kubenswrapper[4689]: readyz check failed Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.324680 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.327033 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xls2d"] Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.504452 4689 generic.go:334] "Generic (PLEG): container finished" podID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" exitCode=0 Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.505998 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.506058 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:19 crc kubenswrapper[4689]: E0123 12:14:19.539267 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="extract-content" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.539317 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="extract-content" Jan 23 12:14:19 crc kubenswrapper[4689]: E0123 12:14:19.539375 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="extract-utilities" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.539386 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="extract-utilities" Jan 23 12:14:19 crc kubenswrapper[4689]: E0123 12:14:19.539417 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c315092-e9fe-409e-81ca-39ac98b9fea6" containerName="tempest-tests-tempest-tests-runner" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.539429 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c315092-e9fe-409e-81ca-39ac98b9fea6" containerName="tempest-tests-tempest-tests-runner" Jan 23 12:14:19 crc kubenswrapper[4689]: E0123 12:14:19.539501 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="registry-server" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.539511 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="registry-server" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.540055 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c315092-e9fe-409e-81ca-39ac98b9fea6" containerName="tempest-tests-tempest-tests-runner" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.540093 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b59050fc-c6bd-427e-a99f-3f2d920d4e6d" containerName="registry-server" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.547259 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7db46dddd6-txhvk" event={"ID":"222ae852-00ad-449b-a92b-b0f52d2b856f","Type":"ContainerDied","Data":"412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84"} Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.547410 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.609555 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-utilities\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.610085 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgs9k\" (UniqueName: \"kubernetes.io/projected/141ce67f-c500-4ace-918e-45b8e5bce2ba-kube-api-access-dgs9k\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.610130 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-catalog-content\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.712879 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgs9k\" (UniqueName: \"kubernetes.io/projected/141ce67f-c500-4ace-918e-45b8e5bce2ba-kube-api-access-dgs9k\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.712980 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-catalog-content\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.713039 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-utilities\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.713973 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-utilities\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.714100 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-catalog-content\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.870386 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:19 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:19 crc kubenswrapper[4689]: > Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.968272 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgs9k\" (UniqueName: \"kubernetes.io/projected/141ce67f-c500-4ace-918e-45b8e5bce2ba-kube-api-access-dgs9k\") pod \"redhat-marketplace-xls2d\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:19 crc kubenswrapper[4689]: I0123 12:14:19.977850 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xls2d"] Jan 23 12:14:20 crc kubenswrapper[4689]: I0123 12:14:20.172173 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:20 crc kubenswrapper[4689]: E0123 12:14:20.523400 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84 is running failed: container process not found" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:20 crc kubenswrapper[4689]: E0123 12:14:20.524001 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84 is running failed: container process not found" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:20 crc kubenswrapper[4689]: E0123 12:14:20.524370 4689 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84 is running failed: container process not found" containerID="412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 23 12:14:20 crc kubenswrapper[4689]: E0123 12:14:20.524447 4689 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 412aa7683b16447641cc011ec8418913fef11baf1105070d8e9b768d202e3b84 is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-7db46dddd6-txhvk" podUID="222ae852-00ad-449b-a92b-b0f52d2b856f" containerName="heat-engine" Jan 23 12:14:21 crc kubenswrapper[4689]: I0123 12:14:21.194550 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xls2d"] Jan 23 12:14:21 crc kubenswrapper[4689]: I0123 12:14:21.378272 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:21 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:21 crc kubenswrapper[4689]: > Jan 23 12:14:21 crc kubenswrapper[4689]: I0123 12:14:21.531344 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerStarted","Data":"009edde855c59c75fbed9e20ab40153db72a2aafc883c7a44306e0da10c8e082"} Jan 23 12:14:21 crc kubenswrapper[4689]: I0123 12:14:21.533173 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7db46dddd6-txhvk" event={"ID":"222ae852-00ad-449b-a92b-b0f52d2b856f","Type":"ContainerStarted","Data":"61cc068627e1a4990d9842ec8a90810a2ca505e9d57548030df7c5d7350b3716"} Jan 23 12:14:21 crc kubenswrapper[4689]: I0123 12:14:21.533309 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 12:14:21 crc kubenswrapper[4689]: I0123 12:14:21.972502 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:21 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:21 crc kubenswrapper[4689]: > Jan 23 12:14:22 crc kubenswrapper[4689]: I0123 12:14:22.554921 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerStarted","Data":"03cdc20dd0cd4f0af84bd508d2dbb570330c84b4eea10d3b97aa4e1f7a4cf3fa"} Jan 23 12:14:23 crc kubenswrapper[4689]: I0123 12:14:23.447523 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:23 crc kubenswrapper[4689]: I0123 12:14:23.569106 4689 generic.go:334] "Generic (PLEG): container finished" podID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerID="03cdc20dd0cd4f0af84bd508d2dbb570330c84b4eea10d3b97aa4e1f7a4cf3fa" exitCode=0 Jan 23 12:14:23 crc kubenswrapper[4689]: I0123 12:14:23.569195 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerDied","Data":"03cdc20dd0cd4f0af84bd508d2dbb570330c84b4eea10d3b97aa4e1f7a4cf3fa"} Jan 23 12:14:23 crc kubenswrapper[4689]: I0123 12:14:23.801559 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-h6w2m" podUID="5f0a7d6b-1743-49ab-9f0b-2742ce992ecf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:23 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:23 crc kubenswrapper[4689]: > Jan 23 12:14:24 crc kubenswrapper[4689]: I0123 12:14:24.331697 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]log ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:14:24 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:14:24 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:14:24 crc kubenswrapper[4689]: readyz check failed Jan 23 12:14:24 crc kubenswrapper[4689]: I0123 12:14:24.332022 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:24 crc kubenswrapper[4689]: I0123 12:14:24.472290 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:24 crc kubenswrapper[4689]: I0123 12:14:24.472345 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:24 crc kubenswrapper[4689]: I0123 12:14:24.472728 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:24 crc kubenswrapper[4689]: I0123 12:14:24.472745 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:24 crc kubenswrapper[4689]: I0123 12:14:24.641617 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:14:24 crc kubenswrapper[4689]: E0123 12:14:24.641847 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:14:25 crc kubenswrapper[4689]: I0123 12:14:25.253597 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" Jan 23 12:14:28 crc kubenswrapper[4689]: I0123 12:14:28.525549 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:28 crc kubenswrapper[4689]: I0123 12:14:28.646329 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerStarted","Data":"83625ce84992466d2cabfcd3a004bfec9f88ce00920fa69bdc1ba1134b2a6f59"} Jan 23 12:14:29 crc kubenswrapper[4689]: I0123 12:14:29.339572 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]log ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:14:29 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:14:29 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:14:29 crc kubenswrapper[4689]: readyz check failed Jan 23 12:14:29 crc kubenswrapper[4689]: I0123 12:14:29.339667 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:29 crc kubenswrapper[4689]: I0123 12:14:29.578868 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:29 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:29 crc kubenswrapper[4689]: > Jan 23 12:14:29 crc kubenswrapper[4689]: I0123 12:14:29.590046 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6b5bd865cb-ppjnx" Jan 23 12:14:30 crc kubenswrapper[4689]: I0123 12:14:30.799437 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 12:14:30 crc kubenswrapper[4689]: I0123 12:14:30.854376 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 12:14:31 crc kubenswrapper[4689]: I0123 12:14:31.364311 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:31 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:31 crc kubenswrapper[4689]: > Jan 23 12:14:31 crc kubenswrapper[4689]: I0123 12:14:31.766855 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Jan 23 12:14:31 crc kubenswrapper[4689]: I0123 12:14:31.983247 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:31 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:31 crc kubenswrapper[4689]: > Jan 23 12:14:32 crc kubenswrapper[4689]: I0123 12:14:32.697043 4689 generic.go:334] "Generic (PLEG): container finished" podID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerID="83625ce84992466d2cabfcd3a004bfec9f88ce00920fa69bdc1ba1134b2a6f59" exitCode=0 Jan 23 12:14:32 crc kubenswrapper[4689]: I0123 12:14:32.697133 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerDied","Data":"83625ce84992466d2cabfcd3a004bfec9f88ce00920fa69bdc1ba1134b2a6f59"} Jan 23 12:14:33 crc kubenswrapper[4689]: I0123 12:14:33.078039 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 12:14:33 crc kubenswrapper[4689]: I0123 12:14:33.250536 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h6w2m" Jan 23 12:14:33 crc kubenswrapper[4689]: I0123 12:14:33.537707 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:33 crc kubenswrapper[4689]: I0123 12:14:33.716637 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerStarted","Data":"94a962a1dbc6a2d7d5689506f873aca79bcf8ff7cf2891d52ce900d8e84b4a50"} Jan 23 12:14:33 crc kubenswrapper[4689]: I0123 12:14:33.750189 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xls2d" podStartSLOduration=5.058370035 podStartE2EDuration="14.750019058s" podCreationTimestamp="2026-01-23 12:14:19 +0000 UTC" firstStartedPulling="2026-01-23 12:14:23.571873227 +0000 UTC m=+5128.196553086" lastFinishedPulling="2026-01-23 12:14:33.26352225 +0000 UTC m=+5137.888202109" observedRunningTime="2026-01-23 12:14:33.742445954 +0000 UTC m=+5138.367125813" watchObservedRunningTime="2026-01-23 12:14:33.750019058 +0000 UTC m=+5138.374698917" Jan 23 12:14:34 crc kubenswrapper[4689]: I0123 12:14:34.326059 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]log ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]etcd ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]etcd-readiness ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]informer-sync ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]poststarthook/max-in-flight-filter ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 23 12:14:34 crc kubenswrapper[4689]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 23 12:14:34 crc kubenswrapper[4689]: [-]shutdown failed: reason withheld Jan 23 12:14:34 crc kubenswrapper[4689]: readyz check failed Jan 23 12:14:34 crc kubenswrapper[4689]: I0123 12:14:34.326542 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:34 crc kubenswrapper[4689]: I0123 12:14:34.473626 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:34 crc kubenswrapper[4689]: I0123 12:14:34.473678 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:34 crc kubenswrapper[4689]: I0123 12:14:34.474095 4689 patch_prober.go:28] interesting pod/downloads-7954f5f757-hc5js container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 12:14:34 crc kubenswrapper[4689]: I0123 12:14:34.474290 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-hc5js" podUID="fa9894a6-c179-4b45-a036-b94c23125162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 12:14:35 crc kubenswrapper[4689]: I0123 12:14:35.650697 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:14:35 crc kubenswrapper[4689]: E0123 12:14:35.651140 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:14:36 crc kubenswrapper[4689]: I0123 12:14:36.035347 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-l5n7d" podUID="8359ad74-2a40-4f5f-afe6-880a3f0a990e" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:36 crc kubenswrapper[4689]: I0123 12:14:36.172279 4689 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-99t57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:36 crc kubenswrapper[4689]: I0123 12:14:36.172336 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-99t57" podUID="3377dc50-b5b0-40d0-9b16-295713320fcd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:36 crc kubenswrapper[4689]: I0123 12:14:36.206871 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-pjr88" Jan 23 12:14:36 crc kubenswrapper[4689]: I0123 12:14:36.229246 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.229315 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.229757 4689 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zg96c container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.229831 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zg96c" podUID="6105379b-1fb8-4384-b6d5-67b4db5498e5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.329391 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.329459 4689 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-wx978 container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.329477 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.329507 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wx978" podUID="a8f4f0c7-61db-4423-8f3a-229e4ac94951" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.581317 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-d9bfx" podUID="d92e2c5f-df9d-44e5-839c-806799a650a4" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:37 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:37 crc kubenswrapper[4689]: > Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.798531 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.798606 4689 patch_prober.go:28] interesting pod/controller-manager-6589dc88cb-62qls container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.798642 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:36.798702 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-6589dc88cb-62qls" podUID="9c5d05df-7a1c-4c0c-b791-cc8e932d2560" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:37.462445 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:37.462500 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:37.493500 4689 patch_prober.go:28] interesting pod/oauth-openshift-77df6bdc9c-zhjr5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:37.493645 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-77df6bdc9c-zhjr5" podUID="5e81a398-514c-4bfa-9038-7ede14a02743" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:14:37 crc kubenswrapper[4689]: I0123 12:14:37.744027 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.756465137s: [/var/lib/containers/storage/overlay/04d9d269390d193ae890cc85c57e95508b6bbe02315963c00528915f40755a8a/diff /var/log/pods/metallb-system_metallb-operator-webhook-server-64d6f55f49-snsq8_6087eb3b-66c0-4d14-a5de-008f086a59ee/webhook-server/1.log]; will not log again for this container unless duration exceeds 2s Jan 23 12:14:38 crc kubenswrapper[4689]: I0123 12:14:38.364740 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:14:38 crc kubenswrapper[4689]: I0123 12:14:38.365397 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-notification-agent" containerID="cri-o://6098162babea8cfd4c7f55b669eee4ce04f5786e57f9d4def7f83ed691caddfb" gracePeriod=30 Jan 23 12:14:38 crc kubenswrapper[4689]: I0123 12:14:38.365436 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" containerID="cri-o://c373abcc771c979cf5a24cc5f09bb29da6f8f40ac65b6320f1cbfd5fc2a215bc" gracePeriod=30 Jan 23 12:14:38 crc kubenswrapper[4689]: I0123 12:14:38.365511 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="sg-core" containerID="cri-o://8fad4b14ea5ad828a0f6899db108864769ee46a1ca035e7b257cab386013d812" gracePeriod=30 Jan 23 12:14:38 crc kubenswrapper[4689]: I0123 12:14:38.365436 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="proxy-httpd" containerID="cri-o://b8cabd2f0fdc8c5c8d7cea421616ca14c9408a9cb864048d613d4366d22650ff" gracePeriod=30 Jan 23 12:14:38 crc kubenswrapper[4689]: I0123 12:14:38.502954 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.320717 4689 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-w4md5 container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.321042 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" podUID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.657915 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:39 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:39 crc kubenswrapper[4689]: > Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.834601 4689 generic.go:334] "Generic (PLEG): container finished" podID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerID="c373abcc771c979cf5a24cc5f09bb29da6f8f40ac65b6320f1cbfd5fc2a215bc" exitCode=0 Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.834651 4689 generic.go:334] "Generic (PLEG): container finished" podID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerID="b8cabd2f0fdc8c5c8d7cea421616ca14c9408a9cb864048d613d4366d22650ff" exitCode=0 Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.834667 4689 generic.go:334] "Generic (PLEG): container finished" podID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerID="8fad4b14ea5ad828a0f6899db108864769ee46a1ca035e7b257cab386013d812" exitCode=2 Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.834669 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerDied","Data":"c373abcc771c979cf5a24cc5f09bb29da6f8f40ac65b6320f1cbfd5fc2a215bc"} Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.834725 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerDied","Data":"b8cabd2f0fdc8c5c8d7cea421616ca14c9408a9cb864048d613d4366d22650ff"} Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.834735 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerDied","Data":"8fad4b14ea5ad828a0f6899db108864769ee46a1ca035e7b257cab386013d812"} Jan 23 12:14:39 crc kubenswrapper[4689]: I0123 12:14:39.834753 4689 scope.go:117] "RemoveContainer" containerID="9c32f1149eaa11f33916981fc8fd2ba53b5fc5ff3ce5418895c08eb8a0538fef" Jan 23 12:14:40 crc kubenswrapper[4689]: I0123 12:14:40.174467 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:40 crc kubenswrapper[4689]: I0123 12:14:40.175782 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:40 crc kubenswrapper[4689]: I0123 12:14:40.325486 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.1.15:3000/\": dial tcp 10.217.1.15:3000: connect: connection refused" Jan 23 12:14:40 crc kubenswrapper[4689]: I0123 12:14:40.610388 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7db46dddd6-txhvk" Jan 23 12:14:41 crc kubenswrapper[4689]: I0123 12:14:41.047391 4689 generic.go:334] "Generic (PLEG): container finished" podID="9e8ec1f3-5f7f-4150-82e2-34b2f4910385" containerID="ae53b5bf555f0f34d69a4028ac38c5e675df395b21c5bd362fa18c339f207ede" exitCode=0 Jan 23 12:14:41 crc kubenswrapper[4689]: I0123 12:14:41.049649 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" event={"ID":"9e8ec1f3-5f7f-4150-82e2-34b2f4910385","Type":"ContainerDied","Data":"ae53b5bf555f0f34d69a4028ac38c5e675df395b21c5bd362fa18c339f207ede"} Jan 23 12:14:41 crc kubenswrapper[4689]: I0123 12:14:41.049685 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" event={"ID":"9e8ec1f3-5f7f-4150-82e2-34b2f4910385","Type":"ContainerStarted","Data":"4e081561acfaad153bb1877cdb8cbfc99df76e55927f02ab68fadc0d810e9956"} Jan 23 12:14:41 crc kubenswrapper[4689]: I0123 12:14:41.273979 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-xls2d" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:41 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:41 crc kubenswrapper[4689]: > Jan 23 12:14:41 crc kubenswrapper[4689]: I0123 12:14:41.382779 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4hdbj" podUID="b7c32de2-03fb-4b12-8fdf-69161c24eed2" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:41 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:41 crc kubenswrapper[4689]: > Jan 23 12:14:42 crc kubenswrapper[4689]: I0123 12:14:42.279686 4689 generic.go:334] "Generic (PLEG): container finished" podID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerID="b859dce2528167c415e6815b84ddb660609b01e841a0018b5c3d844432f26392" exitCode=1 Jan 23 12:14:42 crc kubenswrapper[4689]: I0123 12:14:42.280943 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerDied","Data":"b859dce2528167c415e6815b84ddb660609b01e841a0018b5c3d844432f26392"} Jan 23 12:14:42 crc kubenswrapper[4689]: I0123 12:14:42.281484 4689 scope.go:117] "RemoveContainer" containerID="b859dce2528167c415e6815b84ddb660609b01e841a0018b5c3d844432f26392" Jan 23 12:14:42 crc kubenswrapper[4689]: I0123 12:14:42.282461 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-x87gr" podUID="5fbbf7f9-c268-4a7b-a278-4f72a9099acf" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:42 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:42 crc kubenswrapper[4689]: > Jan 23 12:14:43 crc kubenswrapper[4689]: I0123 12:14:43.130087 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 12:14:43 crc kubenswrapper[4689]: I0123 12:14:43.426101 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 23 12:14:43 crc kubenswrapper[4689]: I0123 12:14:43.536466 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 23 12:14:43 crc kubenswrapper[4689]: I0123 12:14:43.692077 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:44 crc kubenswrapper[4689]: I0123 12:14:44.111734 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output=< Jan 23 12:14:44 crc kubenswrapper[4689]: wsrep_local_state_comment (Joined) differs from Synced Jan 23 12:14:44 crc kubenswrapper[4689]: > Jan 23 12:14:44 crc kubenswrapper[4689]: I0123 12:14:44.135295 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="9926a3b2-8d65-4876-b56b-488948df1352" containerName="galera" probeResult="failure" output=< Jan 23 12:14:44 crc kubenswrapper[4689]: wsrep_local_state_comment (Joined) differs from Synced Jan 23 12:14:44 crc kubenswrapper[4689]: > Jan 23 12:14:44 crc kubenswrapper[4689]: I0123 12:14:44.317793 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 12:14:44 crc kubenswrapper[4689]: I0123 12:14:44.317848 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 12:14:44 crc kubenswrapper[4689]: I0123 12:14:44.322363 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 12:14:44 crc kubenswrapper[4689]: I0123 12:14:44.332468 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="cab355b0-25b6-4ad4-83ad-718ae756ae29" containerName="galera" probeResult="failure" output=< Jan 23 12:14:44 crc kubenswrapper[4689]: wsrep_local_state_comment (Joined) differs from Synced Jan 23 12:14:44 crc kubenswrapper[4689]: > Jan 23 12:14:44 crc kubenswrapper[4689]: I0123 12:14:44.484614 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-hc5js" Jan 23 12:14:45 crc kubenswrapper[4689]: I0123 12:14:45.330450 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w4md5" Jan 23 12:14:46 crc kubenswrapper[4689]: I0123 12:14:46.332567 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" event={"ID":"60f6136e-990e-4ca3-88d3-ff00f4db14e7","Type":"ContainerStarted","Data":"3e87ec5df8cdb606ab6435f125d60c131c6cee6dfe4ddd997248358d7a1a82d5"} Jan 23 12:14:46 crc kubenswrapper[4689]: I0123 12:14:46.336804 4689 generic.go:334] "Generic (PLEG): container finished" podID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerID="6098162babea8cfd4c7f55b669eee4ce04f5786e57f9d4def7f83ed691caddfb" exitCode=0 Jan 23 12:14:46 crc kubenswrapper[4689]: I0123 12:14:46.336878 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerDied","Data":"6098162babea8cfd4c7f55b669eee4ce04f5786e57f9d4def7f83ed691caddfb"} Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.698582 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.831541 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2hnz\" (UniqueName: \"kubernetes.io/projected/141af3c4-ba78-43ed-af9c-0f98846eb67f-kube-api-access-d2hnz\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.831619 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-ceilometer-tls-certs\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.831725 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-scripts\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.831793 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-log-httpd\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.831822 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-run-httpd\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.832030 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-combined-ca-bundle\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.832097 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-sg-core-conf-yaml\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.832180 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-config-data\") pod \"141af3c4-ba78-43ed-af9c-0f98846eb67f\" (UID: \"141af3c4-ba78-43ed-af9c-0f98846eb67f\") " Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.834239 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.836553 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.840930 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/141af3c4-ba78-43ed-af9c-0f98846eb67f-kube-api-access-d2hnz" (OuterVolumeSpecName: "kube-api-access-d2hnz") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "kube-api-access-d2hnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.863665 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-scripts" (OuterVolumeSpecName: "scripts") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.877422 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.935309 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.935350 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2hnz\" (UniqueName: \"kubernetes.io/projected/141af3c4-ba78-43ed-af9c-0f98846eb67f-kube-api-access-d2hnz\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.935363 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.935375 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.935385 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/141af3c4-ba78-43ed-af9c-0f98846eb67f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:47 crc kubenswrapper[4689]: I0123 12:14:47.969411 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.016339 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-config-data" (OuterVolumeSpecName: "config-data") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.037531 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.037573 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.044428 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "141af3c4-ba78-43ed-af9c-0f98846eb67f" (UID: "141af3c4-ba78-43ed-af9c-0f98846eb67f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.139480 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141af3c4-ba78-43ed-af9c-0f98846eb67f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.370718 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.370783 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"141af3c4-ba78-43ed-af9c-0f98846eb67f","Type":"ContainerDied","Data":"fe2d0802d82bbb6e0e47d06154288d4db6fda1d8822ef239cc9340555d0413ee"} Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.371974 4689 scope.go:117] "RemoveContainer" containerID="c373abcc771c979cf5a24cc5f09bb29da6f8f40ac65b6320f1cbfd5fc2a215bc" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.424735 4689 scope.go:117] "RemoveContainer" containerID="b8cabd2f0fdc8c5c8d7cea421616ca14c9408a9cb864048d613d4366d22650ff" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.427813 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="543b7065-0dd2-411e-9854-8aaa3e11dd3e" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.434936 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.451840 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.465681 4689 scope.go:117] "RemoveContainer" containerID="8fad4b14ea5ad828a0f6899db108864769ee46a1ca035e7b257cab386013d812" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.482189 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:14:48 crc kubenswrapper[4689]: E0123 12:14:48.492556 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-notification-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.492598 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-notification-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: E0123 12:14:48.492639 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.492647 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: E0123 12:14:48.492669 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="proxy-httpd" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.492674 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="proxy-httpd" Jan 23 12:14:48 crc kubenswrapper[4689]: E0123 12:14:48.492695 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="sg-core" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.492701 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="sg-core" Jan 23 12:14:48 crc kubenswrapper[4689]: E0123 12:14:48.492720 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.492727 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.493102 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.493123 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="proxy-httpd" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.493135 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="sg-core" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.493171 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-notification-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.496483 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" containerName="ceilometer-central-agent" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.501047 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.501169 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.504954 4689 scope.go:117] "RemoveContainer" containerID="6098162babea8cfd4c7f55b669eee4ce04f5786e57f9d4def7f83ed691caddfb" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.506553 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.509895 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.513502 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.640074 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:14:48 crc kubenswrapper[4689]: E0123 12:14:48.640516 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.654949 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-config-data\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.655009 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.655033 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7fsz\" (UniqueName: \"kubernetes.io/projected/e4e8267e-4a9c-4cf9-889f-24a9032add94-kube-api-access-v7fsz\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.655292 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-run-httpd\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.655331 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-log-httpd\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.655439 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-scripts\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.655476 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.655521 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.757987 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-run-httpd\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.758041 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-log-httpd\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.758118 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-scripts\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.758204 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.758235 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.758312 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-config-data\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.758332 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.758349 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7fsz\" (UniqueName: \"kubernetes.io/projected/e4e8267e-4a9c-4cf9-889f-24a9032add94-kube-api-access-v7fsz\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.759486 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-run-httpd\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.759714 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-log-httpd\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.762870 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.764724 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-config-data\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.764905 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.765464 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-scripts\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.774334 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.779003 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7fsz\" (UniqueName: \"kubernetes.io/projected/e4e8267e-4a9c-4cf9-889f-24a9032add94-kube-api-access-v7fsz\") pod \"ceilometer-0\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " pod="openstack/ceilometer-0" Jan 23 12:14:48 crc kubenswrapper[4689]: I0123 12:14:48.830021 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:14:49 crc kubenswrapper[4689]: I0123 12:14:49.339104 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:14:49 crc kubenswrapper[4689]: W0123 12:14:49.368765 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4e8267e_4a9c_4cf9_889f_24a9032add94.slice/crio-68d0829179e432461c60a518cca07f4b56b621bb0bc905c4ba91d3a5447fd0f2 WatchSource:0}: Error finding container 68d0829179e432461c60a518cca07f4b56b621bb0bc905c4ba91d3a5447fd0f2: Status 404 returned error can't find the container with id 68d0829179e432461c60a518cca07f4b56b621bb0bc905c4ba91d3a5447fd0f2 Jan 23 12:14:49 crc kubenswrapper[4689]: I0123 12:14:49.613974 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:49 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:49 crc kubenswrapper[4689]: > Jan 23 12:14:49 crc kubenswrapper[4689]: I0123 12:14:49.650763 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="141af3c4-ba78-43ed-af9c-0f98846eb67f" path="/var/lib/kubelet/pods/141af3c4-ba78-43ed-af9c-0f98846eb67f/volumes" Jan 23 12:14:50 crc kubenswrapper[4689]: I0123 12:14:50.263937 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:50 crc kubenswrapper[4689]: I0123 12:14:50.342380 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:50 crc kubenswrapper[4689]: I0123 12:14:50.393911 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 12:14:50 crc kubenswrapper[4689]: I0123 12:14:50.404403 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerStarted","Data":"68d0829179e432461c60a518cca07f4b56b621bb0bc905c4ba91d3a5447fd0f2"} Jan 23 12:14:50 crc kubenswrapper[4689]: I0123 12:14:50.449710 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4hdbj" Jan 23 12:14:50 crc kubenswrapper[4689]: I0123 12:14:50.960019 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 12:14:51 crc kubenswrapper[4689]: I0123 12:14:51.007225 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x87gr" Jan 23 12:14:51 crc kubenswrapper[4689]: I0123 12:14:51.415643 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerStarted","Data":"5c3f4e709eda03d440452933b9b47f31c55ecf66a62a05e81dd0c5a80f67a080"} Jan 23 12:14:52 crc kubenswrapper[4689]: I0123 12:14:52.774675 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 23 12:14:53 crc kubenswrapper[4689]: I0123 12:14:53.437334 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerStarted","Data":"c8c941db98c408fe90bb837ad6ce281735172be70bbb26b5af94eb6cd9c72443"} Jan 23 12:14:53 crc kubenswrapper[4689]: I0123 12:14:53.437670 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerStarted","Data":"d519166f00bcfc9c745f45cb04b29c770ba622da1eb7e5aa29c6c52c93f2aa47"} Jan 23 12:14:53 crc kubenswrapper[4689]: I0123 12:14:53.441581 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 23 12:14:53 crc kubenswrapper[4689]: I0123 12:14:53.716221 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xls2d"] Jan 23 12:14:53 crc kubenswrapper[4689]: I0123 12:14:53.716456 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xls2d" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="registry-server" containerID="cri-o://94a962a1dbc6a2d7d5689506f873aca79bcf8ff7cf2891d52ce900d8e84b4a50" gracePeriod=2 Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.336110 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.463082 4689 generic.go:334] "Generic (PLEG): container finished" podID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerID="94a962a1dbc6a2d7d5689506f873aca79bcf8ff7cf2891d52ce900d8e84b4a50" exitCode=0 Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.463530 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerDied","Data":"94a962a1dbc6a2d7d5689506f873aca79bcf8ff7cf2891d52ce900d8e84b4a50"} Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.463566 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xls2d" event={"ID":"141ce67f-c500-4ace-918e-45b8e5bce2ba","Type":"ContainerDied","Data":"009edde855c59c75fbed9e20ab40153db72a2aafc883c7a44306e0da10c8e082"} Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.463583 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="009edde855c59c75fbed9e20ab40153db72a2aafc883c7a44306e0da10c8e082" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.615754 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.738818 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-utilities\") pod \"141ce67f-c500-4ace-918e-45b8e5bce2ba\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.738937 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-catalog-content\") pod \"141ce67f-c500-4ace-918e-45b8e5bce2ba\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.738989 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgs9k\" (UniqueName: \"kubernetes.io/projected/141ce67f-c500-4ace-918e-45b8e5bce2ba-kube-api-access-dgs9k\") pod \"141ce67f-c500-4ace-918e-45b8e5bce2ba\" (UID: \"141ce67f-c500-4ace-918e-45b8e5bce2ba\") " Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.739486 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-utilities" (OuterVolumeSpecName: "utilities") pod "141ce67f-c500-4ace-918e-45b8e5bce2ba" (UID: "141ce67f-c500-4ace-918e-45b8e5bce2ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.740180 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.763507 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "141ce67f-c500-4ace-918e-45b8e5bce2ba" (UID: "141ce67f-c500-4ace-918e-45b8e5bce2ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.766517 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/141ce67f-c500-4ace-918e-45b8e5bce2ba-kube-api-access-dgs9k" (OuterVolumeSpecName: "kube-api-access-dgs9k") pod "141ce67f-c500-4ace-918e-45b8e5bce2ba" (UID: "141ce67f-c500-4ace-918e-45b8e5bce2ba"). InnerVolumeSpecName "kube-api-access-dgs9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.843092 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141ce67f-c500-4ace-918e-45b8e5bce2ba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:54 crc kubenswrapper[4689]: I0123 12:14:54.843134 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgs9k\" (UniqueName: \"kubernetes.io/projected/141ce67f-c500-4ace-918e-45b8e5bce2ba-kube-api-access-dgs9k\") on node \"crc\" DevicePath \"\"" Jan 23 12:14:55 crc kubenswrapper[4689]: I0123 12:14:55.477522 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xls2d" Jan 23 12:14:55 crc kubenswrapper[4689]: I0123 12:14:55.477509 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerStarted","Data":"feca70f03cf25b83fb1ba69fbdee96df486f4ea1c819d0c483a67b0e93831640"} Jan 23 12:14:55 crc kubenswrapper[4689]: I0123 12:14:55.477792 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 12:14:55 crc kubenswrapper[4689]: I0123 12:14:55.531236 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.581549612 podStartE2EDuration="7.531211763s" podCreationTimestamp="2026-01-23 12:14:48 +0000 UTC" firstStartedPulling="2026-01-23 12:14:49.38596016 +0000 UTC m=+5154.010640009" lastFinishedPulling="2026-01-23 12:14:54.335622301 +0000 UTC m=+5158.960302160" observedRunningTime="2026-01-23 12:14:55.518918723 +0000 UTC m=+5160.143598582" watchObservedRunningTime="2026-01-23 12:14:55.531211763 +0000 UTC m=+5160.155891622" Jan 23 12:14:55 crc kubenswrapper[4689]: I0123 12:14:55.552353 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xls2d"] Jan 23 12:14:55 crc kubenswrapper[4689]: I0123 12:14:55.564980 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xls2d"] Jan 23 12:14:55 crc kubenswrapper[4689]: I0123 12:14:55.654396 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" path="/var/lib/kubelet/pods/141ce67f-c500-4ace-918e-45b8e5bce2ba/volumes" Jan 23 12:14:59 crc kubenswrapper[4689]: I0123 12:14:59.574783 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4zskl" podUID="0cb158e3-50d7-4750-8f95-c22d0a94a70f" containerName="registry-server" probeResult="failure" output=< Jan 23 12:14:59 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:14:59 crc kubenswrapper[4689]: > Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.208712 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99"] Jan 23 12:15:00 crc kubenswrapper[4689]: E0123 12:15:00.209680 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="extract-content" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.209707 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="extract-content" Jan 23 12:15:00 crc kubenswrapper[4689]: E0123 12:15:00.209724 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="registry-server" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.209732 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="registry-server" Jan 23 12:15:00 crc kubenswrapper[4689]: E0123 12:15:00.209750 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="extract-utilities" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.209758 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="extract-utilities" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.210040 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="141ce67f-c500-4ace-918e-45b8e5bce2ba" containerName="registry-server" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.211819 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.217549 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.217893 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.226139 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99"] Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.284460 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bda30271-ff43-4655-8f7e-27e7dc659eaa-secret-volume\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.284705 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bda30271-ff43-4655-8f7e-27e7dc659eaa-config-volume\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.285074 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szmmj\" (UniqueName: \"kubernetes.io/projected/bda30271-ff43-4655-8f7e-27e7dc659eaa-kube-api-access-szmmj\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.387222 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szmmj\" (UniqueName: \"kubernetes.io/projected/bda30271-ff43-4655-8f7e-27e7dc659eaa-kube-api-access-szmmj\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.387739 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bda30271-ff43-4655-8f7e-27e7dc659eaa-secret-volume\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.387936 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bda30271-ff43-4655-8f7e-27e7dc659eaa-config-volume\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.389239 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bda30271-ff43-4655-8f7e-27e7dc659eaa-config-volume\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.405965 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bda30271-ff43-4655-8f7e-27e7dc659eaa-secret-volume\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.415336 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szmmj\" (UniqueName: \"kubernetes.io/projected/bda30271-ff43-4655-8f7e-27e7dc659eaa-kube-api-access-szmmj\") pod \"collect-profiles-29486175-5zv99\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:00 crc kubenswrapper[4689]: I0123 12:15:00.548951 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.271526 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99"] Jan 23 12:15:01 crc kubenswrapper[4689]: W0123 12:15:01.277042 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbda30271_ff43_4655_8f7e_27e7dc659eaa.slice/crio-f26966032547d2eed86188d8b2a4fb4168ff35a0724041186f641a5e7d5cb1e1 WatchSource:0}: Error finding container f26966032547d2eed86188d8b2a4fb4168ff35a0724041186f641a5e7d5cb1e1: Status 404 returned error can't find the container with id f26966032547d2eed86188d8b2a4fb4168ff35a0724041186f641a5e7d5cb1e1 Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.453913 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.455948 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.459810 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tpqqr" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.484375 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.533707 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5c12c927-06bf-44b8-bf6f-724ecb1e431a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.534200 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tt7wm\" (UniqueName: \"kubernetes.io/projected/5c12c927-06bf-44b8-bf6f-724ecb1e431a-kube-api-access-tt7wm\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5c12c927-06bf-44b8-bf6f-724ecb1e431a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.582920 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" event={"ID":"bda30271-ff43-4655-8f7e-27e7dc659eaa","Type":"ContainerStarted","Data":"f26966032547d2eed86188d8b2a4fb4168ff35a0724041186f641a5e7d5cb1e1"} Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.639065 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tt7wm\" (UniqueName: \"kubernetes.io/projected/5c12c927-06bf-44b8-bf6f-724ecb1e431a-kube-api-access-tt7wm\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5c12c927-06bf-44b8-bf6f-724ecb1e431a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.639489 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5c12c927-06bf-44b8-bf6f-724ecb1e431a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.640552 4689 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5c12c927-06bf-44b8-bf6f-724ecb1e431a\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.642354 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:15:01 crc kubenswrapper[4689]: E0123 12:15:01.642849 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.683051 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tt7wm\" (UniqueName: \"kubernetes.io/projected/5c12c927-06bf-44b8-bf6f-724ecb1e431a-kube-api-access-tt7wm\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5c12c927-06bf-44b8-bf6f-724ecb1e431a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.692580 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5c12c927-06bf-44b8-bf6f-724ecb1e431a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:01 crc kubenswrapper[4689]: I0123 12:15:01.844455 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.595386 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.631339 4689 generic.go:334] "Generic (PLEG): container finished" podID="bda30271-ff43-4655-8f7e-27e7dc659eaa" containerID="f4d4953de09e58f08f12e3673e0f35ee7ea7f60757b6a22c26af104f50339df5" exitCode=0 Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.632036 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" event={"ID":"bda30271-ff43-4655-8f7e-27e7dc659eaa","Type":"ContainerDied","Data":"f4d4953de09e58f08f12e3673e0f35ee7ea7f60757b6a22c26af104f50339df5"} Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.701741 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.702358 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-central-agent" containerID="cri-o://5c3f4e709eda03d440452933b9b47f31c55ecf66a62a05e81dd0c5a80f67a080" gracePeriod=30 Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.703069 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="proxy-httpd" containerID="cri-o://feca70f03cf25b83fb1ba69fbdee96df486f4ea1c819d0c483a67b0e93831640" gracePeriod=30 Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.703619 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="sg-core" containerID="cri-o://c8c941db98c408fe90bb837ad6ce281735172be70bbb26b5af94eb6cd9c72443" gracePeriod=30 Jan 23 12:15:02 crc kubenswrapper[4689]: I0123 12:15:02.703647 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-notification-agent" containerID="cri-o://d519166f00bcfc9c745f45cb04b29c770ba622da1eb7e5aa29c6c52c93f2aa47" gracePeriod=30 Jan 23 12:15:03 crc kubenswrapper[4689]: I0123 12:15:03.724662 4689 generic.go:334] "Generic (PLEG): container finished" podID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerID="feca70f03cf25b83fb1ba69fbdee96df486f4ea1c819d0c483a67b0e93831640" exitCode=0 Jan 23 12:15:03 crc kubenswrapper[4689]: I0123 12:15:03.724924 4689 generic.go:334] "Generic (PLEG): container finished" podID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerID="c8c941db98c408fe90bb837ad6ce281735172be70bbb26b5af94eb6cd9c72443" exitCode=2 Jan 23 12:15:03 crc kubenswrapper[4689]: I0123 12:15:03.724932 4689 generic.go:334] "Generic (PLEG): container finished" podID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerID="d519166f00bcfc9c745f45cb04b29c770ba622da1eb7e5aa29c6c52c93f2aa47" exitCode=0 Jan 23 12:15:03 crc kubenswrapper[4689]: I0123 12:15:03.729693 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerDied","Data":"feca70f03cf25b83fb1ba69fbdee96df486f4ea1c819d0c483a67b0e93831640"} Jan 23 12:15:03 crc kubenswrapper[4689]: I0123 12:15:03.729744 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerDied","Data":"c8c941db98c408fe90bb837ad6ce281735172be70bbb26b5af94eb6cd9c72443"} Jan 23 12:15:03 crc kubenswrapper[4689]: I0123 12:15:03.729757 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerDied","Data":"d519166f00bcfc9c745f45cb04b29c770ba622da1eb7e5aa29c6c52c93f2aa47"} Jan 23 12:15:03 crc kubenswrapper[4689]: I0123 12:15:03.739996 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5c12c927-06bf-44b8-bf6f-724ecb1e431a","Type":"ContainerStarted","Data":"8655090d37f790e36652a3b14112f867297b1159eecd8ce231339ed4b8affbe8"} Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.454255 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.562684 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szmmj\" (UniqueName: \"kubernetes.io/projected/bda30271-ff43-4655-8f7e-27e7dc659eaa-kube-api-access-szmmj\") pod \"bda30271-ff43-4655-8f7e-27e7dc659eaa\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.562926 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bda30271-ff43-4655-8f7e-27e7dc659eaa-config-volume\") pod \"bda30271-ff43-4655-8f7e-27e7dc659eaa\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.562975 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bda30271-ff43-4655-8f7e-27e7dc659eaa-secret-volume\") pod \"bda30271-ff43-4655-8f7e-27e7dc659eaa\" (UID: \"bda30271-ff43-4655-8f7e-27e7dc659eaa\") " Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.565794 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bda30271-ff43-4655-8f7e-27e7dc659eaa-config-volume" (OuterVolumeSpecName: "config-volume") pod "bda30271-ff43-4655-8f7e-27e7dc659eaa" (UID: "bda30271-ff43-4655-8f7e-27e7dc659eaa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.573415 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bda30271-ff43-4655-8f7e-27e7dc659eaa-kube-api-access-szmmj" (OuterVolumeSpecName: "kube-api-access-szmmj") pod "bda30271-ff43-4655-8f7e-27e7dc659eaa" (UID: "bda30271-ff43-4655-8f7e-27e7dc659eaa"). InnerVolumeSpecName "kube-api-access-szmmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.590286 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda30271-ff43-4655-8f7e-27e7dc659eaa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bda30271-ff43-4655-8f7e-27e7dc659eaa" (UID: "bda30271-ff43-4655-8f7e-27e7dc659eaa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.666770 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szmmj\" (UniqueName: \"kubernetes.io/projected/bda30271-ff43-4655-8f7e-27e7dc659eaa-kube-api-access-szmmj\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.666809 4689 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bda30271-ff43-4655-8f7e-27e7dc659eaa-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.666819 4689 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bda30271-ff43-4655-8f7e-27e7dc659eaa-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.754475 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5c12c927-06bf-44b8-bf6f-724ecb1e431a","Type":"ContainerStarted","Data":"aeebc3d85c9e6e3646bc9415016d97a47ace0bcd9c8791971170555df3c6c1b0"} Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.756972 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" event={"ID":"bda30271-ff43-4655-8f7e-27e7dc659eaa","Type":"ContainerDied","Data":"f26966032547d2eed86188d8b2a4fb4168ff35a0724041186f641a5e7d5cb1e1"} Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.757008 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f26966032547d2eed86188d8b2a4fb4168ff35a0724041186f641a5e7d5cb1e1" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.757058 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29486175-5zv99" Jan 23 12:15:04 crc kubenswrapper[4689]: I0123 12:15:04.782042 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.170783003 podStartE2EDuration="3.782019513s" podCreationTimestamp="2026-01-23 12:15:01 +0000 UTC" firstStartedPulling="2026-01-23 12:15:02.615806742 +0000 UTC m=+5167.240486601" lastFinishedPulling="2026-01-23 12:15:04.227043252 +0000 UTC m=+5168.851723111" observedRunningTime="2026-01-23 12:15:04.771229689 +0000 UTC m=+5169.395909558" watchObservedRunningTime="2026-01-23 12:15:04.782019513 +0000 UTC m=+5169.406699372" Jan 23 12:15:05 crc kubenswrapper[4689]: I0123 12:15:05.550335 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg"] Jan 23 12:15:05 crc kubenswrapper[4689]: I0123 12:15:05.610834 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29486130-hbqgg"] Jan 23 12:15:05 crc kubenswrapper[4689]: I0123 12:15:05.665543 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1976a333-0b40-42b5-af0b-f817b46cbbc3" path="/var/lib/kubelet/pods/1976a333-0b40-42b5-af0b-f817b46cbbc3/volumes" Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.794914 4689 generic.go:334] "Generic (PLEG): container finished" podID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerID="5c3f4e709eda03d440452933b9b47f31c55ecf66a62a05e81dd0c5a80f67a080" exitCode=0 Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.795084 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerDied","Data":"5c3f4e709eda03d440452933b9b47f31c55ecf66a62a05e81dd0c5a80f67a080"} Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.795452 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4e8267e-4a9c-4cf9-889f-24a9032add94","Type":"ContainerDied","Data":"68d0829179e432461c60a518cca07f4b56b621bb0bc905c4ba91d3a5447fd0f2"} Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.795468 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68d0829179e432461c60a518cca07f4b56b621bb0bc905c4ba91d3a5447fd0f2" Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.817194 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.970848 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-combined-ca-bundle\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.971588 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-config-data\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.971664 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-run-httpd\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.971699 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-sg-core-conf-yaml\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.971843 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-log-httpd\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.971941 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-scripts\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.972023 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-ceilometer-tls-certs\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.972111 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7fsz\" (UniqueName: \"kubernetes.io/projected/e4e8267e-4a9c-4cf9-889f-24a9032add94-kube-api-access-v7fsz\") pod \"e4e8267e-4a9c-4cf9-889f-24a9032add94\" (UID: \"e4e8267e-4a9c-4cf9-889f-24a9032add94\") " Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.973791 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.973936 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.978842 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-scripts" (OuterVolumeSpecName: "scripts") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:15:07 crc kubenswrapper[4689]: I0123 12:15:07.984886 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4e8267e-4a9c-4cf9-889f-24a9032add94-kube-api-access-v7fsz" (OuterVolumeSpecName: "kube-api-access-v7fsz") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "kube-api-access-v7fsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.027529 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.063309 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.074828 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7fsz\" (UniqueName: \"kubernetes.io/projected/e4e8267e-4a9c-4cf9-889f-24a9032add94-kube-api-access-v7fsz\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.074864 4689 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.074874 4689 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.074882 4689 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4e8267e-4a9c-4cf9-889f-24a9032add94-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.074890 4689 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.074901 4689 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.093906 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.125305 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-config-data" (OuterVolumeSpecName: "config-data") pod "e4e8267e-4a9c-4cf9-889f-24a9032add94" (UID: "e4e8267e-4a9c-4cf9-889f-24a9032add94"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.177843 4689 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.177888 4689 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4e8267e-4a9c-4cf9-889f-24a9032add94-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.571605 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.627428 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4zskl" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.815674 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.879289 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.893602 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.936630 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:15:08 crc kubenswrapper[4689]: E0123 12:15:08.937294 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="proxy-httpd" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937320 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="proxy-httpd" Jan 23 12:15:08 crc kubenswrapper[4689]: E0123 12:15:08.937371 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-central-agent" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937390 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-central-agent" Jan 23 12:15:08 crc kubenswrapper[4689]: E0123 12:15:08.937399 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="sg-core" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937408 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="sg-core" Jan 23 12:15:08 crc kubenswrapper[4689]: E0123 12:15:08.937437 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bda30271-ff43-4655-8f7e-27e7dc659eaa" containerName="collect-profiles" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937446 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="bda30271-ff43-4655-8f7e-27e7dc659eaa" containerName="collect-profiles" Jan 23 12:15:08 crc kubenswrapper[4689]: E0123 12:15:08.937464 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-notification-agent" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937481 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-notification-agent" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937794 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-notification-agent" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937819 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="bda30271-ff43-4655-8f7e-27e7dc659eaa" containerName="collect-profiles" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937831 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="sg-core" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937855 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="ceilometer-central-agent" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.937866 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" containerName="proxy-httpd" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.940806 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.947183 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.947428 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.955581 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 12:15:08 crc kubenswrapper[4689]: I0123 12:15:08.968325 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.017393 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-config-data\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.017478 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.017540 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-scripts\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.017569 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.017699 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-log-httpd\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.017812 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-run-httpd\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.021405 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.021463 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqppk\" (UniqueName: \"kubernetes.io/projected/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-kube-api-access-gqppk\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.123885 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.123932 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-scripts\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.123950 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.123981 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-log-httpd\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.124026 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-run-httpd\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.124199 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.124217 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqppk\" (UniqueName: \"kubernetes.io/projected/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-kube-api-access-gqppk\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.124274 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-config-data\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.125344 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-log-httpd\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.125813 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-run-httpd\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.130858 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.131038 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-config-data\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.130862 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.133011 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-scripts\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.142986 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.146963 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqppk\" (UniqueName: \"kubernetes.io/projected/155cdbe2-316b-4eeb-a1a5-3e59dddda4d1-kube-api-access-gqppk\") pod \"ceilometer-0\" (UID: \"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1\") " pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.265311 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.666756 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4e8267e-4a9c-4cf9-889f-24a9032add94" path="/var/lib/kubelet/pods/e4e8267e-4a9c-4cf9-889f-24a9032add94/volumes" Jan 23 12:15:09 crc kubenswrapper[4689]: I0123 12:15:09.926670 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 12:15:10 crc kubenswrapper[4689]: I0123 12:15:10.846728 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1","Type":"ContainerStarted","Data":"f509fa74dd69972e4ef1149d3caa3b62da5be53b86b340349b0a605cf8d9d01a"} Jan 23 12:15:10 crc kubenswrapper[4689]: I0123 12:15:10.847245 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1","Type":"ContainerStarted","Data":"43378d6ac90894b233892f4dfa6d68805f2690c6dc15ad028072e0b9447633f2"} Jan 23 12:15:11 crc kubenswrapper[4689]: I0123 12:15:11.866695 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1","Type":"ContainerStarted","Data":"e0192ffb21abfaa9b2c6c7eb4fa8457ed0aa0b54e0536bfb8e7b6268bd0d81ef"} Jan 23 12:15:11 crc kubenswrapper[4689]: I0123 12:15:11.867225 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1","Type":"ContainerStarted","Data":"9bceb26146669085f3c3b53bdcd983843f69696f778f549fbbb468a67175287d"} Jan 23 12:15:12 crc kubenswrapper[4689]: I0123 12:15:12.640802 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:15:12 crc kubenswrapper[4689]: E0123 12:15:12.642043 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:15:13 crc kubenswrapper[4689]: I0123 12:15:13.893049 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"155cdbe2-316b-4eeb-a1a5-3e59dddda4d1","Type":"ContainerStarted","Data":"5aca242919a8c7884d256ebd1f55ab352129ee20fe05337b1e6324622de8de1d"} Jan 23 12:15:13 crc kubenswrapper[4689]: I0123 12:15:13.893407 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 12:15:13 crc kubenswrapper[4689]: I0123 12:15:13.926911 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.851843314 podStartE2EDuration="5.926891276s" podCreationTimestamp="2026-01-23 12:15:08 +0000 UTC" firstStartedPulling="2026-01-23 12:15:09.903372806 +0000 UTC m=+5174.528052655" lastFinishedPulling="2026-01-23 12:15:12.978420758 +0000 UTC m=+5177.603100617" observedRunningTime="2026-01-23 12:15:13.913403286 +0000 UTC m=+5178.538083145" watchObservedRunningTime="2026-01-23 12:15:13.926891276 +0000 UTC m=+5178.551571135" Jan 23 12:15:27 crc kubenswrapper[4689]: I0123 12:15:27.640844 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:15:27 crc kubenswrapper[4689]: E0123 12:15:27.641517 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:15:31 crc kubenswrapper[4689]: I0123 12:15:31.899427 4689 scope.go:117] "RemoveContainer" containerID="3bd8f63a66c771a5479f7167a52d15bfef12eeccaf2dcf8b53ca21f361d00fe4" Jan 23 12:15:39 crc kubenswrapper[4689]: I0123 12:15:39.292733 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 23 12:15:40 crc kubenswrapper[4689]: I0123 12:15:40.640527 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:15:40 crc kubenswrapper[4689]: E0123 12:15:40.641176 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.146201 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rr6h7/must-gather-xszvc"] Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.151007 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.153589 4689 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rr6h7"/"default-dockercfg-n29mp" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.153872 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rr6h7"/"openshift-service-ca.crt" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.153970 4689 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rr6h7"/"kube-root-ca.crt" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.170875 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rr6h7/must-gather-xszvc"] Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.225787 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n9sp\" (UniqueName: \"kubernetes.io/projected/2b1985c0-6892-4629-9f40-fe58155f22df-kube-api-access-9n9sp\") pod \"must-gather-xszvc\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.226036 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2b1985c0-6892-4629-9f40-fe58155f22df-must-gather-output\") pod \"must-gather-xszvc\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.328204 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2b1985c0-6892-4629-9f40-fe58155f22df-must-gather-output\") pod \"must-gather-xszvc\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.328633 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n9sp\" (UniqueName: \"kubernetes.io/projected/2b1985c0-6892-4629-9f40-fe58155f22df-kube-api-access-9n9sp\") pod \"must-gather-xszvc\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.329251 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2b1985c0-6892-4629-9f40-fe58155f22df-must-gather-output\") pod \"must-gather-xszvc\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.353555 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n9sp\" (UniqueName: \"kubernetes.io/projected/2b1985c0-6892-4629-9f40-fe58155f22df-kube-api-access-9n9sp\") pod \"must-gather-xszvc\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:44 crc kubenswrapper[4689]: I0123 12:15:44.486259 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:15:45 crc kubenswrapper[4689]: I0123 12:15:45.038378 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rr6h7/must-gather-xszvc"] Jan 23 12:15:45 crc kubenswrapper[4689]: I0123 12:15:45.041885 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 12:15:45 crc kubenswrapper[4689]: I0123 12:15:45.289710 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/must-gather-xszvc" event={"ID":"2b1985c0-6892-4629-9f40-fe58155f22df","Type":"ContainerStarted","Data":"e90b351fb830e68d5696ef04ef8abf536b6d983f430b011a4db1640c712e7c25"} Jan 23 12:15:50 crc kubenswrapper[4689]: I0123 12:15:50.416486 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.110153505s: [/var/lib/containers/storage/overlay/5f7ef939e0e5f5cf9741469ffb2a2b7e608d359271372cbeac48809be14ff336/diff /var/log/pods/openstack_prometheus-metric-storage-0_8cc0bc0c-47d7-48d8-bfba-a9694ab485a0/prometheus/1.log]; will not log again for this container unless duration exceeds 2s Jan 23 12:15:50 crc kubenswrapper[4689]: I0123 12:15:50.417398 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.070715773s: [/var/lib/containers/storage/overlay/e1fc1ba01594be641af174c34bb76a8802faf3316c703110156f7aa2cf39bbdd/diff /var/log/pods/openstack_nova-scheduler-0_6ba90fb7-d664-4eb6-90db-4ad3909ebfbf/nova-scheduler-scheduler/0.log]; will not log again for this container unless duration exceeds 2s Jan 23 12:15:52 crc kubenswrapper[4689]: I0123 12:15:52.386558 4689 generic.go:334] "Generic (PLEG): container finished" podID="c09648d1-cecf-420d-8b1c-226eb880a7a3" containerID="38aa0cd5f5288400c6515cf6a9235754c9a90fa712fa7bcc350799d57840292e" exitCode=0 Jan 23 12:15:52 crc kubenswrapper[4689]: I0123 12:15:52.386567 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" event={"ID":"c09648d1-cecf-420d-8b1c-226eb880a7a3","Type":"ContainerDied","Data":"38aa0cd5f5288400c6515cf6a9235754c9a90fa712fa7bcc350799d57840292e"} Jan 23 12:15:54 crc kubenswrapper[4689]: I0123 12:15:54.641352 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:15:54 crc kubenswrapper[4689]: E0123 12:15:54.642184 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:15:57 crc kubenswrapper[4689]: I0123 12:15:57.466519 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" event={"ID":"c09648d1-cecf-420d-8b1c-226eb880a7a3","Type":"ContainerStarted","Data":"7ed9336549fdb3648266617b7ca248ed493f0c7558b12ca73bf19cbca2071fac"} Jan 23 12:15:57 crc kubenswrapper[4689]: I0123 12:15:57.472897 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/must-gather-xszvc" event={"ID":"2b1985c0-6892-4629-9f40-fe58155f22df","Type":"ContainerStarted","Data":"c00b578b8aa59bc83b6fe1fe6fae5821747f9536ef6a2b221fe0ab93504f6416"} Jan 23 12:15:57 crc kubenswrapper[4689]: I0123 12:15:57.472941 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/must-gather-xszvc" event={"ID":"2b1985c0-6892-4629-9f40-fe58155f22df","Type":"ContainerStarted","Data":"280f6fa62adee5a95242d4bfdb303233af7761812447d940872c877f63f93b24"} Jan 23 12:15:57 crc kubenswrapper[4689]: I0123 12:15:57.547922 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rr6h7/must-gather-xszvc" podStartSLOduration=2.236431743 podStartE2EDuration="13.547898845s" podCreationTimestamp="2026-01-23 12:15:44 +0000 UTC" firstStartedPulling="2026-01-23 12:15:45.040198485 +0000 UTC m=+5209.664878344" lastFinishedPulling="2026-01-23 12:15:56.351665597 +0000 UTC m=+5220.976345446" observedRunningTime="2026-01-23 12:15:57.518282182 +0000 UTC m=+5222.142962051" watchObservedRunningTime="2026-01-23 12:15:57.547898845 +0000 UTC m=+5222.172578704" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.410692 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-6jv2w"] Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.415297 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.583499 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-698cz\" (UniqueName: \"kubernetes.io/projected/f759b6a5-02d4-4851-a379-6c2242094384-kube-api-access-698cz\") pod \"crc-debug-6jv2w\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.584017 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f759b6a5-02d4-4851-a379-6c2242094384-host\") pod \"crc-debug-6jv2w\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.686656 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f759b6a5-02d4-4851-a379-6c2242094384-host\") pod \"crc-debug-6jv2w\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.687240 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f759b6a5-02d4-4851-a379-6c2242094384-host\") pod \"crc-debug-6jv2w\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.687396 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-698cz\" (UniqueName: \"kubernetes.io/projected/f759b6a5-02d4-4851-a379-6c2242094384-kube-api-access-698cz\") pod \"crc-debug-6jv2w\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.711857 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-698cz\" (UniqueName: \"kubernetes.io/projected/f759b6a5-02d4-4851-a379-6c2242094384-kube-api-access-698cz\") pod \"crc-debug-6jv2w\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:02 crc kubenswrapper[4689]: I0123 12:16:02.748719 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:16:03 crc kubenswrapper[4689]: I0123 12:16:03.554896 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" event={"ID":"f759b6a5-02d4-4851-a379-6c2242094384","Type":"ContainerStarted","Data":"1e0aeb6c1fed5f602c897ca3a5e45b6833df0ae81bb6df5528d917b5ab99b8c8"} Jan 23 12:16:06 crc kubenswrapper[4689]: I0123 12:16:06.640249 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:16:07 crc kubenswrapper[4689]: I0123 12:16:07.620818 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"aa4f5b129cfa4ef2d2db5743f3b751306632ef017ae5776c4040e3f29f5f9480"} Jan 23 12:16:09 crc kubenswrapper[4689]: I0123 12:16:09.872702 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 12:16:09 crc kubenswrapper[4689]: I0123 12:16:09.874465 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 12:16:16 crc kubenswrapper[4689]: I0123 12:16:16.738803 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" event={"ID":"f759b6a5-02d4-4851-a379-6c2242094384","Type":"ContainerStarted","Data":"e04808975d377142b9f4ef73cdc9e77ec75098a2c422e97872d1e19fcba0ca9e"} Jan 23 12:16:16 crc kubenswrapper[4689]: I0123 12:16:16.761658 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" podStartSLOduration=1.577267 podStartE2EDuration="14.761633882s" podCreationTimestamp="2026-01-23 12:16:02 +0000 UTC" firstStartedPulling="2026-01-23 12:16:02.865724856 +0000 UTC m=+5227.490404715" lastFinishedPulling="2026-01-23 12:16:16.050091738 +0000 UTC m=+5240.674771597" observedRunningTime="2026-01-23 12:16:16.749972877 +0000 UTC m=+5241.374652736" watchObservedRunningTime="2026-01-23 12:16:16.761633882 +0000 UTC m=+5241.386313741" Jan 23 12:16:29 crc kubenswrapper[4689]: I0123 12:16:29.879705 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 12:16:29 crc kubenswrapper[4689]: I0123 12:16:29.883423 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-56b6c6f75d-w9wz2" Jan 23 12:16:50 crc kubenswrapper[4689]: I0123 12:16:50.277290 4689 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.031857524s: [/var/lib/containers/storage/overlay/90439ca9d295bf5fbbdb8f32790868d02808e7d0cf8767ff373e515a572da475/diff /var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-22rdn_54403d19-67da-4783-8b45-b7070bc15424/manager/1.log]; will not log again for this container unless duration exceeds 2s Jan 23 12:16:50 crc kubenswrapper[4689]: I0123 12:16:50.279907 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-22rdn" podUID="54403d19-67da-4783-8b45-b7070bc15424" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:16:50 crc kubenswrapper[4689]: I0123 12:16:50.289781 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-rt7xn" podUID="a9f05c03-72c2-4906-b327-df50d5922d28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:16:50 crc kubenswrapper[4689]: I0123 12:16:50.312566 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-xhgxj" podUID="60f6136e-990e-4ca3-88d3-ff00f4db14e7" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 12:17:07 crc kubenswrapper[4689]: I0123 12:17:07.665851 4689 generic.go:334] "Generic (PLEG): container finished" podID="f759b6a5-02d4-4851-a379-6c2242094384" containerID="e04808975d377142b9f4ef73cdc9e77ec75098a2c422e97872d1e19fcba0ca9e" exitCode=0 Jan 23 12:17:07 crc kubenswrapper[4689]: I0123 12:17:07.665992 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" event={"ID":"f759b6a5-02d4-4851-a379-6c2242094384","Type":"ContainerDied","Data":"e04808975d377142b9f4ef73cdc9e77ec75098a2c422e97872d1e19fcba0ca9e"} Jan 23 12:17:08 crc kubenswrapper[4689]: I0123 12:17:08.822209 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:17:08 crc kubenswrapper[4689]: I0123 12:17:08.877133 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-6jv2w"] Jan 23 12:17:08 crc kubenswrapper[4689]: I0123 12:17:08.894681 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-6jv2w"] Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.017115 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f759b6a5-02d4-4851-a379-6c2242094384-host\") pod \"f759b6a5-02d4-4851-a379-6c2242094384\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.017284 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f759b6a5-02d4-4851-a379-6c2242094384-host" (OuterVolumeSpecName: "host") pod "f759b6a5-02d4-4851-a379-6c2242094384" (UID: "f759b6a5-02d4-4851-a379-6c2242094384"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.017563 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-698cz\" (UniqueName: \"kubernetes.io/projected/f759b6a5-02d4-4851-a379-6c2242094384-kube-api-access-698cz\") pod \"f759b6a5-02d4-4851-a379-6c2242094384\" (UID: \"f759b6a5-02d4-4851-a379-6c2242094384\") " Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.018951 4689 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f759b6a5-02d4-4851-a379-6c2242094384-host\") on node \"crc\" DevicePath \"\"" Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.026612 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f759b6a5-02d4-4851-a379-6c2242094384-kube-api-access-698cz" (OuterVolumeSpecName: "kube-api-access-698cz") pod "f759b6a5-02d4-4851-a379-6c2242094384" (UID: "f759b6a5-02d4-4851-a379-6c2242094384"). InnerVolumeSpecName "kube-api-access-698cz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.121674 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-698cz\" (UniqueName: \"kubernetes.io/projected/f759b6a5-02d4-4851-a379-6c2242094384-kube-api-access-698cz\") on node \"crc\" DevicePath \"\"" Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.651347 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f759b6a5-02d4-4851-a379-6c2242094384" path="/var/lib/kubelet/pods/f759b6a5-02d4-4851-a379-6c2242094384/volumes" Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.692502 4689 scope.go:117] "RemoveContainer" containerID="e04808975d377142b9f4ef73cdc9e77ec75098a2c422e97872d1e19fcba0ca9e" Jan 23 12:17:09 crc kubenswrapper[4689]: I0123 12:17:09.692554 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6jv2w" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.064143 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-s2mgp"] Jan 23 12:17:10 crc kubenswrapper[4689]: E0123 12:17:10.065100 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f759b6a5-02d4-4851-a379-6c2242094384" containerName="container-00" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.065116 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="f759b6a5-02d4-4851-a379-6c2242094384" containerName="container-00" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.065453 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="f759b6a5-02d4-4851-a379-6c2242094384" containerName="container-00" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.066558 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.250579 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/929b18ab-7a12-4a4e-938d-36cfbae00d17-host\") pod \"crc-debug-s2mgp\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.250756 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxc2k\" (UniqueName: \"kubernetes.io/projected/929b18ab-7a12-4a4e-938d-36cfbae00d17-kube-api-access-dxc2k\") pod \"crc-debug-s2mgp\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.353315 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/929b18ab-7a12-4a4e-938d-36cfbae00d17-host\") pod \"crc-debug-s2mgp\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.353386 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxc2k\" (UniqueName: \"kubernetes.io/projected/929b18ab-7a12-4a4e-938d-36cfbae00d17-kube-api-access-dxc2k\") pod \"crc-debug-s2mgp\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.353982 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/929b18ab-7a12-4a4e-938d-36cfbae00d17-host\") pod \"crc-debug-s2mgp\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.376076 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxc2k\" (UniqueName: \"kubernetes.io/projected/929b18ab-7a12-4a4e-938d-36cfbae00d17-kube-api-access-dxc2k\") pod \"crc-debug-s2mgp\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.403666 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.710796 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" event={"ID":"929b18ab-7a12-4a4e-938d-36cfbae00d17","Type":"ContainerStarted","Data":"f65782f460656ba5e9910609ac3f5a9c663b818b7833dba56d442cdfbf49d6eb"} Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.710863 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" event={"ID":"929b18ab-7a12-4a4e-938d-36cfbae00d17","Type":"ContainerStarted","Data":"6e7a44311e7237dabacedd66bb63b9f28d589fd72bbc73c7d98d161d0677da73"} Jan 23 12:17:10 crc kubenswrapper[4689]: I0123 12:17:10.730864 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" podStartSLOduration=0.730838553 podStartE2EDuration="730.838553ms" podCreationTimestamp="2026-01-23 12:17:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 12:17:10.72496507 +0000 UTC m=+5295.349644929" watchObservedRunningTime="2026-01-23 12:17:10.730838553 +0000 UTC m=+5295.355518442" Jan 23 12:17:11 crc kubenswrapper[4689]: I0123 12:17:11.730312 4689 generic.go:334] "Generic (PLEG): container finished" podID="929b18ab-7a12-4a4e-938d-36cfbae00d17" containerID="f65782f460656ba5e9910609ac3f5a9c663b818b7833dba56d442cdfbf49d6eb" exitCode=0 Jan 23 12:17:11 crc kubenswrapper[4689]: I0123 12:17:11.730391 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" event={"ID":"929b18ab-7a12-4a4e-938d-36cfbae00d17","Type":"ContainerDied","Data":"f65782f460656ba5e9910609ac3f5a9c663b818b7833dba56d442cdfbf49d6eb"} Jan 23 12:17:12 crc kubenswrapper[4689]: I0123 12:17:12.883042 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:12 crc kubenswrapper[4689]: I0123 12:17:12.922440 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxc2k\" (UniqueName: \"kubernetes.io/projected/929b18ab-7a12-4a4e-938d-36cfbae00d17-kube-api-access-dxc2k\") pod \"929b18ab-7a12-4a4e-938d-36cfbae00d17\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " Jan 23 12:17:12 crc kubenswrapper[4689]: I0123 12:17:12.922683 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/929b18ab-7a12-4a4e-938d-36cfbae00d17-host\") pod \"929b18ab-7a12-4a4e-938d-36cfbae00d17\" (UID: \"929b18ab-7a12-4a4e-938d-36cfbae00d17\") " Jan 23 12:17:12 crc kubenswrapper[4689]: I0123 12:17:12.923221 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/929b18ab-7a12-4a4e-938d-36cfbae00d17-host" (OuterVolumeSpecName: "host") pod "929b18ab-7a12-4a4e-938d-36cfbae00d17" (UID: "929b18ab-7a12-4a4e-938d-36cfbae00d17"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 12:17:12 crc kubenswrapper[4689]: I0123 12:17:12.929722 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/929b18ab-7a12-4a4e-938d-36cfbae00d17-kube-api-access-dxc2k" (OuterVolumeSpecName: "kube-api-access-dxc2k") pod "929b18ab-7a12-4a4e-938d-36cfbae00d17" (UID: "929b18ab-7a12-4a4e-938d-36cfbae00d17"). InnerVolumeSpecName "kube-api-access-dxc2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:17:12 crc kubenswrapper[4689]: I0123 12:17:12.930296 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-s2mgp"] Jan 23 12:17:12 crc kubenswrapper[4689]: I0123 12:17:12.939954 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-s2mgp"] Jan 23 12:17:13 crc kubenswrapper[4689]: I0123 12:17:13.024836 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxc2k\" (UniqueName: \"kubernetes.io/projected/929b18ab-7a12-4a4e-938d-36cfbae00d17-kube-api-access-dxc2k\") on node \"crc\" DevicePath \"\"" Jan 23 12:17:13 crc kubenswrapper[4689]: I0123 12:17:13.024872 4689 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/929b18ab-7a12-4a4e-938d-36cfbae00d17-host\") on node \"crc\" DevicePath \"\"" Jan 23 12:17:13 crc kubenswrapper[4689]: I0123 12:17:13.659796 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="929b18ab-7a12-4a4e-938d-36cfbae00d17" path="/var/lib/kubelet/pods/929b18ab-7a12-4a4e-938d-36cfbae00d17/volumes" Jan 23 12:17:13 crc kubenswrapper[4689]: I0123 12:17:13.758667 4689 scope.go:117] "RemoveContainer" containerID="f65782f460656ba5e9910609ac3f5a9c663b818b7833dba56d442cdfbf49d6eb" Jan 23 12:17:13 crc kubenswrapper[4689]: I0123 12:17:13.758735 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-s2mgp" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.153572 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-6g27s"] Jan 23 12:17:14 crc kubenswrapper[4689]: E0123 12:17:14.154199 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929b18ab-7a12-4a4e-938d-36cfbae00d17" containerName="container-00" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.154224 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="929b18ab-7a12-4a4e-938d-36cfbae00d17" containerName="container-00" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.154557 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="929b18ab-7a12-4a4e-938d-36cfbae00d17" containerName="container-00" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.155535 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.253713 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl6vt\" (UniqueName: \"kubernetes.io/projected/29b7dd4f-ba06-4727-8fd8-7bf4de191085-kube-api-access-rl6vt\") pod \"crc-debug-6g27s\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.253852 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/29b7dd4f-ba06-4727-8fd8-7bf4de191085-host\") pod \"crc-debug-6g27s\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.356384 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/29b7dd4f-ba06-4727-8fd8-7bf4de191085-host\") pod \"crc-debug-6g27s\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.356566 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/29b7dd4f-ba06-4727-8fd8-7bf4de191085-host\") pod \"crc-debug-6g27s\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.356631 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl6vt\" (UniqueName: \"kubernetes.io/projected/29b7dd4f-ba06-4727-8fd8-7bf4de191085-kube-api-access-rl6vt\") pod \"crc-debug-6g27s\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.381847 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl6vt\" (UniqueName: \"kubernetes.io/projected/29b7dd4f-ba06-4727-8fd8-7bf4de191085-kube-api-access-rl6vt\") pod \"crc-debug-6g27s\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.477889 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:14 crc kubenswrapper[4689]: W0123 12:17:14.510827 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29b7dd4f_ba06_4727_8fd8_7bf4de191085.slice/crio-1bb78e92eea94acb8c8a226b11cc0b18bc59deecdfab8399c4db5a42fad295e7 WatchSource:0}: Error finding container 1bb78e92eea94acb8c8a226b11cc0b18bc59deecdfab8399c4db5a42fad295e7: Status 404 returned error can't find the container with id 1bb78e92eea94acb8c8a226b11cc0b18bc59deecdfab8399c4db5a42fad295e7 Jan 23 12:17:14 crc kubenswrapper[4689]: I0123 12:17:14.775472 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-6g27s" event={"ID":"29b7dd4f-ba06-4727-8fd8-7bf4de191085","Type":"ContainerStarted","Data":"1bb78e92eea94acb8c8a226b11cc0b18bc59deecdfab8399c4db5a42fad295e7"} Jan 23 12:17:15 crc kubenswrapper[4689]: I0123 12:17:15.788558 4689 generic.go:334] "Generic (PLEG): container finished" podID="29b7dd4f-ba06-4727-8fd8-7bf4de191085" containerID="5c0969b8277782ab1bd96fc5905c0d524d2c772c849d21de8f23a8e3fa104ed2" exitCode=0 Jan 23 12:17:15 crc kubenswrapper[4689]: I0123 12:17:15.788963 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/crc-debug-6g27s" event={"ID":"29b7dd4f-ba06-4727-8fd8-7bf4de191085","Type":"ContainerDied","Data":"5c0969b8277782ab1bd96fc5905c0d524d2c772c849d21de8f23a8e3fa104ed2"} Jan 23 12:17:15 crc kubenswrapper[4689]: I0123 12:17:15.839893 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-6g27s"] Jan 23 12:17:15 crc kubenswrapper[4689]: I0123 12:17:15.851825 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rr6h7/crc-debug-6g27s"] Jan 23 12:17:16 crc kubenswrapper[4689]: I0123 12:17:16.928433 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.031591 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/29b7dd4f-ba06-4727-8fd8-7bf4de191085-host\") pod \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.031911 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rl6vt\" (UniqueName: \"kubernetes.io/projected/29b7dd4f-ba06-4727-8fd8-7bf4de191085-kube-api-access-rl6vt\") pod \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\" (UID: \"29b7dd4f-ba06-4727-8fd8-7bf4de191085\") " Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.032095 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/29b7dd4f-ba06-4727-8fd8-7bf4de191085-host" (OuterVolumeSpecName: "host") pod "29b7dd4f-ba06-4727-8fd8-7bf4de191085" (UID: "29b7dd4f-ba06-4727-8fd8-7bf4de191085"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.032560 4689 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/29b7dd4f-ba06-4727-8fd8-7bf4de191085-host\") on node \"crc\" DevicePath \"\"" Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.038030 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29b7dd4f-ba06-4727-8fd8-7bf4de191085-kube-api-access-rl6vt" (OuterVolumeSpecName: "kube-api-access-rl6vt") pod "29b7dd4f-ba06-4727-8fd8-7bf4de191085" (UID: "29b7dd4f-ba06-4727-8fd8-7bf4de191085"). InnerVolumeSpecName "kube-api-access-rl6vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.135383 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rl6vt\" (UniqueName: \"kubernetes.io/projected/29b7dd4f-ba06-4727-8fd8-7bf4de191085-kube-api-access-rl6vt\") on node \"crc\" DevicePath \"\"" Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.657948 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29b7dd4f-ba06-4727-8fd8-7bf4de191085" path="/var/lib/kubelet/pods/29b7dd4f-ba06-4727-8fd8-7bf4de191085/volumes" Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.814448 4689 scope.go:117] "RemoveContainer" containerID="5c0969b8277782ab1bd96fc5905c0d524d2c772c849d21de8f23a8e3fa104ed2" Jan 23 12:17:17 crc kubenswrapper[4689]: I0123 12:17:17.814613 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/crc-debug-6g27s" Jan 23 12:17:46 crc kubenswrapper[4689]: I0123 12:17:46.767841 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a74499e9-93c1-46dc-825a-3e9d7ec9adf3/aodh-api/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.013538 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a74499e9-93c1-46dc-825a-3e9d7ec9adf3/aodh-evaluator/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.018611 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a74499e9-93c1-46dc-825a-3e9d7ec9adf3/aodh-listener/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.047555 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a74499e9-93c1-46dc-825a-3e9d7ec9adf3/aodh-notifier/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.187505 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-86d7448556-jq64j_e7fe0b60-2131-41ce-a23d-1ba4eb389afd/barbican-api/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.226580 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-86d7448556-jq64j_e7fe0b60-2131-41ce-a23d-1ba4eb389afd/barbican-api-log/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.391995 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-8756468f4-gcf24_fbdf1ea2-a667-4b97-b775-c76f3bb7f235/barbican-keystone-listener/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.491683 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-8756468f4-gcf24_fbdf1ea2-a667-4b97-b775-c76f3bb7f235/barbican-keystone-listener-log/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.534192 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-78cb64f85f-sf2tl_a7920087-af57-4092-8d74-0bcb75fc9e9d/barbican-worker/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.595094 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-78cb64f85f-sf2tl_a7920087-af57-4092-8d74-0bcb75fc9e9d/barbican-worker-log/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.731504 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-jk5cj_16110fd6-6d8f-4901-8f68-b155d2a27236/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.848877 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_155cdbe2-316b-4eeb-a1a5-3e59dddda4d1/ceilometer-central-agent/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.948109 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_155cdbe2-316b-4eeb-a1a5-3e59dddda4d1/ceilometer-notification-agent/0.log" Jan 23 12:17:47 crc kubenswrapper[4689]: I0123 12:17:47.983722 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_155cdbe2-316b-4eeb-a1a5-3e59dddda4d1/proxy-httpd/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.069737 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_155cdbe2-316b-4eeb-a1a5-3e59dddda4d1/sg-core/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.217340 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_21f5e336-9a50-43ce-8816-46552dcc4b43/cinder-api/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.247935 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_21f5e336-9a50-43ce-8816-46552dcc4b43/cinder-api-log/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.383501 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_543b7065-0dd2-411e-9854-8aaa3e11dd3e/cinder-scheduler/1.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.494193 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_543b7065-0dd2-411e-9854-8aaa3e11dd3e/cinder-scheduler/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.518393 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_543b7065-0dd2-411e-9854-8aaa3e11dd3e/probe/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.624705 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-xmwxp_d2250236-884b-4557-b447-5f6fe512fbdf/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.763073 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-z7kv4_05c5cfbc-c7bf-404d-84f3-d65bb01f34fd/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:48 crc kubenswrapper[4689]: I0123 12:17:48.868561 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5596c69fcc-zngbz_50ee26f5-4a1d-44e4-a32e-331b132626ff/init/0.log" Jan 23 12:17:49 crc kubenswrapper[4689]: I0123 12:17:49.062354 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-bdwqm_700a7d38-2c34-4c5e-a92e-62c448e4c6df/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:49 crc kubenswrapper[4689]: I0123 12:17:49.134805 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5596c69fcc-zngbz_50ee26f5-4a1d-44e4-a32e-331b132626ff/dnsmasq-dns/0.log" Jan 23 12:17:49 crc kubenswrapper[4689]: I0123 12:17:49.147358 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5596c69fcc-zngbz_50ee26f5-4a1d-44e4-a32e-331b132626ff/init/0.log" Jan 23 12:17:49 crc kubenswrapper[4689]: I0123 12:17:49.322142 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_2edb6824-46b3-40f9-8bef-8ec1a068ed8e/glance-httpd/0.log" Jan 23 12:17:49 crc kubenswrapper[4689]: I0123 12:17:49.341787 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_2edb6824-46b3-40f9-8bef-8ec1a068ed8e/glance-log/0.log" Jan 23 12:17:49 crc kubenswrapper[4689]: I0123 12:17:49.604182 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_58c805fc-4794-41c6-a425-9d9efbac01a3/glance-httpd/0.log" Jan 23 12:17:49 crc kubenswrapper[4689]: I0123 12:17:49.694798 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_58c805fc-4794-41c6-a425-9d9efbac01a3/glance-log/0.log" Jan 23 12:17:50 crc kubenswrapper[4689]: I0123 12:17:50.184088 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-7db46dddd6-txhvk_222ae852-00ad-449b-a92b-b0f52d2b856f/heat-engine/1.log" Jan 23 12:17:50 crc kubenswrapper[4689]: I0123 12:17:50.498603 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-7db46dddd6-txhvk_222ae852-00ad-449b-a92b-b0f52d2b856f/heat-engine/0.log" Jan 23 12:17:50 crc kubenswrapper[4689]: I0123 12:17:50.531710 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-79f99b898-dt7zp_04ebf5e0-3578-42f9-a3bc-c1c98b35a8f1/heat-cfnapi/0.log" Jan 23 12:17:50 crc kubenswrapper[4689]: I0123 12:17:50.553179 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-84b55874c-d6r8c_a256dd27-3435-4bcb-9ca0-46a0d472325b/heat-api/0.log" Jan 23 12:17:50 crc kubenswrapper[4689]: I0123 12:17:50.719201 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-tnxs4_7a8f484d-8ae2-4eb5-873c-8051270d53ea/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:50 crc kubenswrapper[4689]: I0123 12:17:50.725174 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-vx4zn_a81f5043-068a-4164-a466-f867c148c637/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:51 crc kubenswrapper[4689]: I0123 12:17:51.076025 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29486161-msgk7_e812dcb0-5d25-4173-8ae5-1c736542d1d3/keystone-cron/0.log" Jan 23 12:17:51 crc kubenswrapper[4689]: I0123 12:17:51.148577 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_db6cec49-030a-4282-a9a4-890f2783c0e5/kube-state-metrics/1.log" Jan 23 12:17:51 crc kubenswrapper[4689]: I0123 12:17:51.375090 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-z7fqz_e5b31e85-298a-4959-b5d0-87adb59850b6/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:51 crc kubenswrapper[4689]: I0123 12:17:51.408873 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_db6cec49-030a-4282-a9a4-890f2783c0e5/kube-state-metrics/0.log" Jan 23 12:17:51 crc kubenswrapper[4689]: I0123 12:17:51.601522 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-llm26_92f7b41c-45c5-4ad7-b7af-7459d727e982/logging-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:51 crc kubenswrapper[4689]: I0123 12:17:51.890518 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_451b4903-934f-44f5-8506-2fc093d6b310/mysqld-exporter/0.log" Jan 23 12:17:52 crc kubenswrapper[4689]: I0123 12:17:52.213033 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6cf4c786cc-4bmzv_3b0b0aa7-a504-49b8-b6b4-5548b6ee7690/neutron-httpd/0.log" Jan 23 12:17:52 crc kubenswrapper[4689]: I0123 12:17:52.311133 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6cf4c786cc-4bmzv_3b0b0aa7-a504-49b8-b6b4-5548b6ee7690/neutron-api/0.log" Jan 23 12:17:52 crc kubenswrapper[4689]: I0123 12:17:52.439135 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-xlql5_5fa207b4-af47-4f97-976c-8d6ac264443e/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:53 crc kubenswrapper[4689]: I0123 12:17:53.021013 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_62f78bd0-c290-481f-9678-8acf00f77fe2/nova-api-log/0.log" Jan 23 12:17:53 crc kubenswrapper[4689]: I0123 12:17:53.233160 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_343d44be-4dcf-4047-beef-a1603131b74b/nova-cell0-conductor-conductor/0.log" Jan 23 12:17:53 crc kubenswrapper[4689]: I0123 12:17:53.448571 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_62f78bd0-c290-481f-9678-8acf00f77fe2/nova-api-api/0.log" Jan 23 12:17:53 crc kubenswrapper[4689]: I0123 12:17:53.758035 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_5f00d886-838d-4678-acd0-d917f134dd59/nova-cell1-conductor-conductor/0.log" Jan 23 12:17:53 crc kubenswrapper[4689]: I0123 12:17:53.803306 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_71303b7c-44e9-4746-85e3-21c519e54d54/nova-cell1-novncproxy-novncproxy/0.log" Jan 23 12:17:54 crc kubenswrapper[4689]: I0123 12:17:54.066517 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-fxrwh_31a5bdbc-3b38-46f3-8e74-bdc66342ec5e/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:54 crc kubenswrapper[4689]: I0123 12:17:54.149240 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_238926d6-2fb4-4759-9ef9-e93cca2c4bb0/nova-metadata-log/0.log" Jan 23 12:17:54 crc kubenswrapper[4689]: I0123 12:17:54.813436 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_6ba90fb7-d664-4eb6-90db-4ad3909ebfbf/nova-scheduler-scheduler/0.log" Jan 23 12:17:54 crc kubenswrapper[4689]: I0123 12:17:54.969104 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cab355b0-25b6-4ad4-83ad-718ae756ae29/mysql-bootstrap/0.log" Jan 23 12:17:55 crc kubenswrapper[4689]: I0123 12:17:55.146859 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cab355b0-25b6-4ad4-83ad-718ae756ae29/mysql-bootstrap/0.log" Jan 23 12:17:55 crc kubenswrapper[4689]: I0123 12:17:55.226137 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cab355b0-25b6-4ad4-83ad-718ae756ae29/galera/1.log" Jan 23 12:17:55 crc kubenswrapper[4689]: I0123 12:17:55.418434 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cab355b0-25b6-4ad4-83ad-718ae756ae29/galera/0.log" Jan 23 12:17:55 crc kubenswrapper[4689]: I0123 12:17:55.666423 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_9926a3b2-8d65-4876-b56b-488948df1352/mysql-bootstrap/0.log" Jan 23 12:17:55 crc kubenswrapper[4689]: I0123 12:17:55.893592 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_9926a3b2-8d65-4876-b56b-488948df1352/mysql-bootstrap/0.log" Jan 23 12:17:55 crc kubenswrapper[4689]: I0123 12:17:55.969891 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_9926a3b2-8d65-4876-b56b-488948df1352/galera/1.log" Jan 23 12:17:56 crc kubenswrapper[4689]: I0123 12:17:56.156408 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_9926a3b2-8d65-4876-b56b-488948df1352/galera/0.log" Jan 23 12:17:56 crc kubenswrapper[4689]: I0123 12:17:56.319304 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_1a99db63-bd38-464f-b9f9-31bc662fb39d/openstackclient/0.log" Jan 23 12:17:56 crc kubenswrapper[4689]: I0123 12:17:56.577755 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-plmwq_833907d1-336e-417d-b362-bffe1f3521d3/openstack-network-exporter/0.log" Jan 23 12:17:56 crc kubenswrapper[4689]: I0123 12:17:56.796392 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kfl9p_f125458c-8822-4c87-a559-adf4f9387166/ovsdb-server-init/0.log" Jan 23 12:17:56 crc kubenswrapper[4689]: I0123 12:17:56.861497 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_238926d6-2fb4-4759-9ef9-e93cca2c4bb0/nova-metadata-metadata/0.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.005718 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kfl9p_f125458c-8822-4c87-a559-adf4f9387166/ovs-vswitchd/0.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.048186 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kfl9p_f125458c-8822-4c87-a559-adf4f9387166/ovsdb-server-init/0.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.133205 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kfl9p_f125458c-8822-4c87-a559-adf4f9387166/ovsdb-server/0.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.283430 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-s4nwf_6c69c7bf-0e75-4bed-a212-2b7746d5ef88/ovn-controller/0.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.564546 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-pwj4k_cf6b169f-f9ae-4ce5-9a76-b98b00912ea0/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.658992 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_43e3f941-da86-4f2a-80ea-24d29e55acb3/openstack-network-exporter/0.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.800884 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_43e3f941-da86-4f2a-80ea-24d29e55acb3/ovn-northd/1.log" Jan 23 12:17:57 crc kubenswrapper[4689]: I0123 12:17:57.843088 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_43e3f941-da86-4f2a-80ea-24d29e55acb3/ovn-northd/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.053780 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8245587e-b385-48bf-a684-2c72fedfb5d6/openstack-network-exporter/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.154526 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8245587e-b385-48bf-a684-2c72fedfb5d6/ovsdbserver-nb/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.230238 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-777b6f5fc9-72drb_036797f0-f940-4ef8-9b43-cc12843d2338/keystone-api/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.292664 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_5c276d1f-838f-4113-b343-18c150dfa59b/openstack-network-exporter/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.410558 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_5c276d1f-838f-4113-b343-18c150dfa59b/ovsdbserver-sb/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.647622 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-68cfbc8d8-tc5p4_5b421cc4-175a-484f-a454-3c38db90b6c5/placement-api/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.735461 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-68cfbc8d8-tc5p4_5b421cc4-175a-484f-a454-3c38db90b6c5/placement-log/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.747509 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8cc0bc0c-47d7-48d8-bfba-a9694ab485a0/init-config-reloader/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.953008 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8cc0bc0c-47d7-48d8-bfba-a9694ab485a0/config-reloader/0.log" Jan 23 12:17:58 crc kubenswrapper[4689]: I0123 12:17:58.963535 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8cc0bc0c-47d7-48d8-bfba-a9694ab485a0/init-config-reloader/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.022387 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8cc0bc0c-47d7-48d8-bfba-a9694ab485a0/prometheus/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.051034 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8cc0bc0c-47d7-48d8-bfba-a9694ab485a0/prometheus/1.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.234631 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5f60dabd-d5a7-417e-a9f4-2a9f06e4778d/setup-container/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.270814 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8cc0bc0c-47d7-48d8-bfba-a9694ab485a0/thanos-sidecar/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.588203 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5f60dabd-d5a7-417e-a9f4-2a9f06e4778d/rabbitmq/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.601431 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5f60dabd-d5a7-417e-a9f4-2a9f06e4778d/setup-container/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.616072 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_94aeeba2-67ab-417d-8a04-a8f22353294f/setup-container/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.831004 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_06df4649-6b5c-4a19-be98-7603002120de/setup-container/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.845214 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_94aeeba2-67ab-417d-8a04-a8f22353294f/setup-container/0.log" Jan 23 12:17:59 crc kubenswrapper[4689]: I0123 12:17:59.871665 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_94aeeba2-67ab-417d-8a04-a8f22353294f/rabbitmq/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.164587 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_06df4649-6b5c-4a19-be98-7603002120de/setup-container/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.210495 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_06df4649-6b5c-4a19-be98-7603002120de/rabbitmq/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.220775 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_907261fc-5550-4dd8-b645-0341b4bdd4de/setup-container/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.448524 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_907261fc-5550-4dd8-b645-0341b4bdd4de/setup-container/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.551729 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-fslfb_98b02990-c192-41b3-88a5-556560831704/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.599363 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_907261fc-5550-4dd8-b645-0341b4bdd4de/rabbitmq/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.781697 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-xspnm_449fef30-676e-47a0-b1ea-8e5922146176/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:18:00 crc kubenswrapper[4689]: I0123 12:18:00.915939 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-cd4zh_5e40e6bf-2a52-4686-a459-50df12dfb406/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:18:01 crc kubenswrapper[4689]: I0123 12:18:01.044295 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-lwxl8_4ea2f795-bc17-496a-af2a-934d59f8aa81/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:18:01 crc kubenswrapper[4689]: I0123 12:18:01.338702 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-8fp9h_162b7807-b14d-451f-8f58-e5bab6b5382c/ssh-known-hosts-edpm-deployment/0.log" Jan 23 12:18:01 crc kubenswrapper[4689]: I0123 12:18:01.502798 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b54798db9-jfwb5_ab0d641a-3762-404a-baff-e2026b4a3896/proxy-server/0.log" Jan 23 12:18:01 crc kubenswrapper[4689]: I0123 12:18:01.537980 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b54798db9-jfwb5_ab0d641a-3762-404a-baff-e2026b4a3896/proxy-httpd/0.log" Jan 23 12:18:01 crc kubenswrapper[4689]: I0123 12:18:01.713128 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-sb9zz_3a33baed-7a5b-44f9-b344-114919fa316b/swift-ring-rebalance/0.log" Jan 23 12:18:01 crc kubenswrapper[4689]: I0123 12:18:01.850498 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/account-auditor/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.039956 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/account-reaper/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.047633 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/account-replicator/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.134645 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/account-server/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.143299 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/container-auditor/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.386271 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/container-replicator/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.584579 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/object-auditor/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.586478 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/container-updater/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.668848 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/container-server/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.926838 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/object-replicator/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.960851 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/object-expirer/0.log" Jan 23 12:18:02 crc kubenswrapper[4689]: I0123 12:18:02.970974 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/object-updater/0.log" Jan 23 12:18:03 crc kubenswrapper[4689]: I0123 12:18:03.006219 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/object-server/0.log" Jan 23 12:18:03 crc kubenswrapper[4689]: I0123 12:18:03.171309 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/rsync/0.log" Jan 23 12:18:03 crc kubenswrapper[4689]: I0123 12:18:03.209398 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_502e87fb-9e46-41c3-929e-c007018641db/swift-recon-cron/0.log" Jan 23 12:18:03 crc kubenswrapper[4689]: I0123 12:18:03.428695 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-ltdtz_725556d9-a125-4022-b69d-2611524af283/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:18:03 crc kubenswrapper[4689]: I0123 12:18:03.579939 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-dp24b_76d15661-afac-4fd0-8c0c-bb3ab33b2c29/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:18:03 crc kubenswrapper[4689]: I0123 12:18:03.835949 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_5c12c927-06bf-44b8-bf6f-724ecb1e431a/test-operator-logs-container/0.log" Jan 23 12:18:04 crc kubenswrapper[4689]: I0123 12:18:04.064098 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-gtqdj_175a3ee2-d571-4f2f-8688-536d09975ffd/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 23 12:18:04 crc kubenswrapper[4689]: I0123 12:18:04.465415 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_8c315092-e9fe-409e-81ca-39ac98b9fea6/tempest-tests-tempest-tests-runner/0.log" Jan 23 12:18:09 crc kubenswrapper[4689]: I0123 12:18:09.204377 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wh677" podUID="3d3561eb-7369-4466-b9ee-037e02b2c219" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 12:18:10 crc kubenswrapper[4689]: I0123 12:18:10.463349 4689 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pr8zt container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:18:10 crc kubenswrapper[4689]: I0123 12:18:10.463725 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pr8zt" podUID="5a9ca72a-63ae-4664-ab99-ec38d2e2d3a3" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:18:10 crc kubenswrapper[4689]: I0123 12:18:10.525131 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:18:10 crc kubenswrapper[4689]: I0123 12:18:10.525187 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.57:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 12:18:11 crc kubenswrapper[4689]: I0123 12:18:11.038525 4689 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 12:18:11 crc kubenswrapper[4689]: I0123 12:18:11.039195 4689 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="a86f4ae1-8a8c-4178-a905-c03ad33a2eca" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.80:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 12:18:11 crc kubenswrapper[4689]: I0123 12:18:11.090176 4689 patch_prober.go:28] interesting pod/logging-loki-gateway-74447864d7-6nhsx container/gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.57:8081/live\": EOF" start-of-body= Jan 23 12:18:11 crc kubenswrapper[4689]: I0123 12:18:11.090234 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-74447864d7-6nhsx" podUID="fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.57:8081/live\": EOF" Jan 23 12:18:24 crc kubenswrapper[4689]: I0123 12:18:24.726926 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_3b85b39c-5625-416e-9ddd-55845b645716/memcached/0.log" Jan 23 12:18:33 crc kubenswrapper[4689]: I0123 12:18:33.315549 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:18:33 crc kubenswrapper[4689]: I0123 12:18:33.316123 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.024641 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x_2496f05e-2cab-45ec-8c73-1820e5c268f6/util/0.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.238057 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x_2496f05e-2cab-45ec-8c73-1820e5c268f6/pull/0.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.261101 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x_2496f05e-2cab-45ec-8c73-1820e5c268f6/util/0.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.304945 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x_2496f05e-2cab-45ec-8c73-1820e5c268f6/pull/0.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.492642 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x_2496f05e-2cab-45ec-8c73-1820e5c268f6/pull/0.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.505191 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x_2496f05e-2cab-45ec-8c73-1820e5c268f6/util/0.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.516531 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2ce15d9fe36760a0c3d3cf10b55c534753fbb8268a6b75e57efe11168enw86x_2496f05e-2cab-45ec-8c73-1820e5c268f6/extract/0.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.739162 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-59dd8b7cbf-86c6v_c9dc7063-1b29-40e1-b451-e9dc882e7476/manager/1.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.808726 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-69cf5d4557-q2clt_d587cb55-dfd2-42e6-bb32-3a4202dd05c5/manager/1.log" Jan 23 12:18:42 crc kubenswrapper[4689]: I0123 12:18:42.812640 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-59dd8b7cbf-86c6v_c9dc7063-1b29-40e1-b451-e9dc882e7476/manager/0.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.067687 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-69cf5d4557-q2clt_d587cb55-dfd2-42e6-bb32-3a4202dd05c5/manager/0.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.072360 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-22rdn_54403d19-67da-4783-8b45-b7070bc15424/manager/0.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.116681 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-22rdn_54403d19-67da-4783-8b45-b7070bc15424/manager/1.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.391195 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-dl6g9_39db2be1-cb37-4ca9-af8a-5ce0f2d1db16/manager/1.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.444437 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-dl6g9_39db2be1-cb37-4ca9-af8a-5ce0f2d1db16/manager/0.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.568597 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-s69pd_1f6a7f15-609b-414e-8119-366afe98811f/manager/1.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.648746 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-s69pd_1f6a7f15-609b-414e-8119-366afe98811f/manager/0.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.698752 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-cgxb7_b5e62e31-60a7-4964-b3e7-611e7a8bfa81/manager/1.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.803716 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-cgxb7_b5e62e31-60a7-4964-b3e7-611e7a8bfa81/manager/0.log" Jan 23 12:18:43 crc kubenswrapper[4689]: I0123 12:18:43.981615 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-54ccf4f85d-l5n7d_8359ad74-2a40-4f5f-afe6-880a3f0a990e/manager/1.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.159289 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-69d6c9f5b8-wh677_3d3561eb-7369-4466-b9ee-037e02b2c219/manager/1.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.239606 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-69d6c9f5b8-wh677_3d3561eb-7369-4466-b9ee-037e02b2c219/manager/0.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.274648 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-54ccf4f85d-l5n7d_8359ad74-2a40-4f5f-afe6-880a3f0a990e/manager/0.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.437740 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-b8s9h_d55b5d87-6f4b-4eb7-bfc7-025b936cebb9/manager/1.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.518755 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-b8s9h_d55b5d87-6f4b-4eb7-bfc7-025b936cebb9/manager/0.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.605540 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-szbq7_28a286e0-4072-40b0-aa95-4a12299f5a72/manager/1.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.692460 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-szbq7_28a286e0-4072-40b0-aa95-4a12299f5a72/manager/0.log" Jan 23 12:18:44 crc kubenswrapper[4689]: I0123 12:18:44.796673 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-m8m6r_5f4d15d8-f941-4082-ab51-3ecda5527f9b/manager/1.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.111905 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-m8m6r_5f4d15d8-f941-4082-ab51-3ecda5527f9b/manager/0.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.236580 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5d8f59fb49-rt7xn_a9f05c03-72c2-4906-b327-df50d5922d28/manager/1.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.316226 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5d8f59fb49-rt7xn_a9f05c03-72c2-4906-b327-df50d5922d28/manager/0.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.445226 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6b8bc8d87d-2lwwn_2bd7a193-5394-452e-9315-0332e4a4e667/manager/1.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.580690 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6b8bc8d87d-2lwwn_2bd7a193-5394-452e-9315-0332e4a4e667/manager/0.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.584048 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7bd9774b6-m2sb7_167d35d1-8eb3-492e-beb3-4325d183c7b9/manager/1.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.698021 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7bd9774b6-m2sb7_167d35d1-8eb3-492e-beb3-4325d183c7b9/manager/0.log" Jan 23 12:18:45 crc kubenswrapper[4689]: I0123 12:18:45.812427 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b8545zczg_99f43d3e-dce3-4f53-90a5-76793663baaf/manager/1.log" Jan 23 12:18:46 crc kubenswrapper[4689]: I0123 12:18:46.561110 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b8545zczg_99f43d3e-dce3-4f53-90a5-76793663baaf/manager/0.log" Jan 23 12:18:46 crc kubenswrapper[4689]: I0123 12:18:46.566994 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-7dc68b46f7-8szn8_53597531-35c9-4478-95cc-690c554f04d0/operator/1.log" Jan 23 12:18:46 crc kubenswrapper[4689]: I0123 12:18:46.858608 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c8c5b48f6-4bw4b_4922b965-fa40-47b5-b388-e63767b62a97/manager/1.log" Jan 23 12:18:46 crc kubenswrapper[4689]: I0123 12:18:46.880360 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-7dc68b46f7-8szn8_53597531-35c9-4478-95cc-690c554f04d0/operator/0.log" Jan 23 12:18:46 crc kubenswrapper[4689]: I0123 12:18:46.890656 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-d9bfx_d92e2c5f-df9d-44e5-839c-806799a650a4/registry-server/1.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.075170 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-5rkch_d36ac685-507d-4cfa-b6fe-7f595536c32f/manager/1.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.097778 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-d9bfx_d92e2c5f-df9d-44e5-839c-806799a650a4/registry-server/0.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.262775 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-5rkch_d36ac685-507d-4cfa-b6fe-7f595536c32f/manager/0.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.409260 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5d646b7d76-kklnd_3369528a-f39f-4e47-92e9-abbca4395b98/manager/1.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.468701 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5d646b7d76-kklnd_3369528a-f39f-4e47-92e9-abbca4395b98/manager/0.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.698668 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-b2s75_b1a77706-f3e5-48b9-95b8-5f13daa0d29f/operator/1.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.934309 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-b2s75_b1a77706-f3e5-48b9-95b8-5f13daa0d29f/operator/0.log" Jan 23 12:18:47 crc kubenswrapper[4689]: I0123 12:18:47.985128 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-t97lk_72fb2e87-da8d-4db1-b255-d38d7c15b5cd/manager/1.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.007651 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c8c5b48f6-4bw4b_4922b965-fa40-47b5-b388-e63767b62a97/manager/0.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.045237 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-t97lk_72fb2e87-da8d-4db1-b255-d38d7c15b5cd/manager/0.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.160473 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-94c58dc69-75pd4_6e48e594-66b4-4d88-823f-2ed90fa79d66/manager/1.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.300479 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-rszc5_7ad0b754-e721-4b19-b0b6-a7e1200a48d4/manager/1.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.388565 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-rszc5_7ad0b754-e721-4b19-b0b6-a7e1200a48d4/manager/0.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.540926 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5ffb9c6597-j79ts_f451d39d-2f3f-4c53-b5a2-d8e7f74247f9/manager/1.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.543993 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5ffb9c6597-j79ts_f451d39d-2f3f-4c53-b5a2-d8e7f74247f9/manager/0.log" Jan 23 12:18:48 crc kubenswrapper[4689]: I0123 12:18:48.578894 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-94c58dc69-75pd4_6e48e594-66b4-4d88-823f-2ed90fa79d66/manager/0.log" Jan 23 12:19:03 crc kubenswrapper[4689]: I0123 12:19:03.311118 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:19:03 crc kubenswrapper[4689]: I0123 12:19:03.311824 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:19:10 crc kubenswrapper[4689]: I0123 12:19:10.637870 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-5zz8l_61756198-4db5-4ee2-b629-c92d64b9bf73/control-plane-machine-set-operator/0.log" Jan 23 12:19:11 crc kubenswrapper[4689]: I0123 12:19:11.153972 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-f2gqj_7f1b033c-cebb-40ef-a05c-798f5455e05f/kube-rbac-proxy/0.log" Jan 23 12:19:11 crc kubenswrapper[4689]: I0123 12:19:11.186420 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-f2gqj_7f1b033c-cebb-40ef-a05c-798f5455e05f/machine-api-operator/0.log" Jan 23 12:19:25 crc kubenswrapper[4689]: I0123 12:19:25.439776 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-ksh87_13e7690f-c133-46d4-8a38-f4238d3cf4cc/cert-manager-controller/0.log" Jan 23 12:19:25 crc kubenswrapper[4689]: I0123 12:19:25.608321 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-cfnhv_6395a95b-6677-4801-9026-34268f7bdf2a/cert-manager-cainjector/0.log" Jan 23 12:19:25 crc kubenswrapper[4689]: I0123 12:19:25.724918 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-pw8pk_7496161e-1841-4a0e-ac40-e157bbfd9520/cert-manager-webhook/0.log" Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.310731 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.311296 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.311347 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.312729 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"aa4f5b129cfa4ef2d2db5743f3b751306632ef017ae5776c4040e3f29f5f9480"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.313043 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://aa4f5b129cfa4ef2d2db5743f3b751306632ef017ae5776c4040e3f29f5f9480" gracePeriod=600 Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.534867 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="aa4f5b129cfa4ef2d2db5743f3b751306632ef017ae5776c4040e3f29f5f9480" exitCode=0 Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.535027 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"aa4f5b129cfa4ef2d2db5743f3b751306632ef017ae5776c4040e3f29f5f9480"} Jan 23 12:19:33 crc kubenswrapper[4689]: I0123 12:19:33.535114 4689 scope.go:117] "RemoveContainer" containerID="3afec6c5a485eb3311a048e74e2af7b7d588a36807b10e7337f71cd0aae9dd51" Jan 23 12:19:34 crc kubenswrapper[4689]: I0123 12:19:34.550748 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0"} Jan 23 12:19:40 crc kubenswrapper[4689]: I0123 12:19:40.596378 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-7hw66_f2a5d877-b98d-41b6-8686-f55b2ef8b34b/nmstate-console-plugin/0.log" Jan 23 12:19:40 crc kubenswrapper[4689]: I0123 12:19:40.799429 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-z5sn9_90c7af03-d2b6-45ef-b228-d5621bf1f671/nmstate-handler/0.log" Jan 23 12:19:40 crc kubenswrapper[4689]: I0123 12:19:40.858364 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-7xhq4_cdc8775f-209a-4342-83fb-78612d37b22b/kube-rbac-proxy/0.log" Jan 23 12:19:40 crc kubenswrapper[4689]: I0123 12:19:40.987906 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-7xhq4_cdc8775f-209a-4342-83fb-78612d37b22b/nmstate-metrics/0.log" Jan 23 12:19:41 crc kubenswrapper[4689]: I0123 12:19:41.113858 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-84f2p_5d52f3fd-b543-410b-acc6-348a9c684ee2/nmstate-operator/0.log" Jan 23 12:19:41 crc kubenswrapper[4689]: I0123 12:19:41.236388 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-2r5kt_83cfd8ec-2928-4cd8-a14c-330cce17bfd5/nmstate-webhook/0.log" Jan 23 12:19:56 crc kubenswrapper[4689]: I0123 12:19:56.158134 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-775d8c8b9c-rkqj2_6cbb7c9e-32cf-4368-8983-96d4006dcd58/kube-rbac-proxy/0.log" Jan 23 12:19:56 crc kubenswrapper[4689]: I0123 12:19:56.191935 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-775d8c8b9c-rkqj2_6cbb7c9e-32cf-4368-8983-96d4006dcd58/manager/1.log" Jan 23 12:19:56 crc kubenswrapper[4689]: I0123 12:19:56.370949 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-775d8c8b9c-rkqj2_6cbb7c9e-32cf-4368-8983-96d4006dcd58/manager/0.log" Jan 23 12:20:09 crc kubenswrapper[4689]: I0123 12:20:09.773705 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-5spsb_55938c76-e594-4530-b7ee-0e7f3089063e/prometheus-operator/0.log" Jan 23 12:20:09 crc kubenswrapper[4689]: I0123 12:20:09.934443 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_2805243a-4a45-4dc0-b5a2-91c2163e11b4/prometheus-operator-admission-webhook/0.log" Jan 23 12:20:09 crc kubenswrapper[4689]: I0123 12:20:09.973506 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_cf8299a2-da8d-489e-bc54-1212dd8d3099/prometheus-operator-admission-webhook/0.log" Jan 23 12:20:10 crc kubenswrapper[4689]: I0123 12:20:10.144047 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-95bv6_45cadce8-d2da-450b-9b37-c2a6b2a1c595/operator/1.log" Jan 23 12:20:10 crc kubenswrapper[4689]: I0123 12:20:10.174993 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-95bv6_45cadce8-d2da-450b-9b37-c2a6b2a1c595/operator/0.log" Jan 23 12:20:10 crc kubenswrapper[4689]: I0123 12:20:10.307033 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-fn992_0c864284-1e65-4712-a75f-bad9506f55d6/observability-ui-dashboards/0.log" Jan 23 12:20:10 crc kubenswrapper[4689]: I0123 12:20:10.448682 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-7qpjs_a13e2123-3780-4c13-b8a4-760d31e5636e/perses-operator/0.log" Jan 23 12:20:25 crc kubenswrapper[4689]: I0123 12:20:25.873072 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-79cf69ddc8-n77sv_4d4b9938-cbd8-4628-bd9e-c402b2cb3828/cluster-logging-operator/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.071871 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-t2cnk_388e3d73-e449-4fdc-9ba2-47b55a360c92/collector/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.078769 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_79a8bb59-41ce-4777-90af-ded6dfe2e080/loki-compactor/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.294650 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-5f678c8dd6-vllhz_72abaa76-42ea-4987-8f23-f4aba4f669e2/loki-distributor/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.336669 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-74447864d7-6nhsx_fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2/gateway/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.392433 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-74447864d7-6nhsx_fc08c5e9-b208-45b6-bd9e-7cc821d7bbe2/opa/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.526344 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-74447864d7-m77fj_ee6fa0a6-5ac3-4202-9280-8babe4cb29a0/gateway/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.546684 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-74447864d7-m77fj_ee6fa0a6-5ac3-4202-9280-8babe4cb29a0/opa/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.760469 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_cb56bf0f-badb-490a-be0a-2ef41c9a2459/loki-ingester/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.769238 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_a86f4ae1-8a8c-4178-a905-c03ad33a2eca/loki-index-gateway/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.913593 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-76788598db-rgsmc_8297556c-bbae-4eb0-b3da-b09a005c90f6/loki-querier/0.log" Jan 23 12:20:26 crc kubenswrapper[4689]: I0123 12:20:26.973483 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-69d9546745-6sm7h_faf0752a-d119-41d3-913f-6377a601e8ca/loki-query-frontend/0.log" Jan 23 12:20:32 crc kubenswrapper[4689]: I0123 12:20:32.227615 4689 scope.go:117] "RemoveContainer" containerID="03cdc20dd0cd4f0af84bd508d2dbb570330c84b4eea10d3b97aa4e1f7a4cf3fa" Jan 23 12:20:32 crc kubenswrapper[4689]: I0123 12:20:32.275463 4689 scope.go:117] "RemoveContainer" containerID="83625ce84992466d2cabfcd3a004bfec9f88ce00920fa69bdc1ba1134b2a6f59" Jan 23 12:20:41 crc kubenswrapper[4689]: I0123 12:20:41.846603 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-mtbb4_4ab08845-476b-4601-9385-bbec37b18e35/controller/1.log" Jan 23 12:20:41 crc kubenswrapper[4689]: I0123 12:20:41.977504 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-mtbb4_4ab08845-476b-4601-9385-bbec37b18e35/controller/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.031573 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-mtbb4_4ab08845-476b-4601-9385-bbec37b18e35/kube-rbac-proxy/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.300056 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-frr-files/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.511049 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-reloader/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.515202 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-frr-files/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.527301 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-metrics/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.597058 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-reloader/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.767175 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-frr-files/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.819540 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-reloader/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.837048 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-metrics/0.log" Jan 23 12:20:42 crc kubenswrapper[4689]: I0123 12:20:42.864760 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-metrics/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.041904 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-frr-files/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.044581 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-reloader/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.050588 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/controller/1.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.053701 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/cp-metrics/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.234747 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/controller/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.258533 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/frr-metrics/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.390174 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/frr/1.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.479549 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/kube-rbac-proxy/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.536539 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/kube-rbac-proxy-frr/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.660025 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/reloader/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.770920 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-vcs9z_3e9ac503-1ccb-4008-866b-0e6e5a11227d/frr-k8s-webhook-server/1.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.892451 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-vcs9z_3e9ac503-1ccb-4008-866b-0e6e5a11227d/frr-k8s-webhook-server/0.log" Jan 23 12:20:43 crc kubenswrapper[4689]: I0123 12:20:43.996775 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6b5bd865cb-ppjnx_af5f2d1f-74a0-4ac2-9e78-c81c3815f722/manager/1.log" Jan 23 12:20:44 crc kubenswrapper[4689]: I0123 12:20:44.152201 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6b5bd865cb-ppjnx_af5f2d1f-74a0-4ac2-9e78-c81c3815f722/manager/0.log" Jan 23 12:20:44 crc kubenswrapper[4689]: I0123 12:20:44.287295 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-64d6f55f49-snsq8_6087eb3b-66c0-4d14-a5de-008f086a59ee/webhook-server/1.log" Jan 23 12:20:44 crc kubenswrapper[4689]: I0123 12:20:44.395903 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-64d6f55f49-snsq8_6087eb3b-66c0-4d14-a5de-008f086a59ee/webhook-server/0.log" Jan 23 12:20:44 crc kubenswrapper[4689]: I0123 12:20:44.535616 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tqgjs_00b72a13-b4c0-43b7-97b9-1e9a1ec55edf/kube-rbac-proxy/0.log" Jan 23 12:20:44 crc kubenswrapper[4689]: I0123 12:20:44.848633 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tqgjs_00b72a13-b4c0-43b7-97b9-1e9a1ec55edf/speaker/1.log" Jan 23 12:20:45 crc kubenswrapper[4689]: I0123 12:20:45.024552 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pkdqh_2cd07ec1-86a5-45f4-b5a6-edaa4f185c17/frr/0.log" Jan 23 12:20:45 crc kubenswrapper[4689]: I0123 12:20:45.345957 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tqgjs_00b72a13-b4c0-43b7-97b9-1e9a1ec55edf/speaker/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.014498 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp_a6b7cf7d-3b83-4abb-8ca1-9b623c85917c/util/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.223402 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp_a6b7cf7d-3b83-4abb-8ca1-9b623c85917c/pull/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.229714 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp_a6b7cf7d-3b83-4abb-8ca1-9b623c85917c/util/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.258806 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp_a6b7cf7d-3b83-4abb-8ca1-9b623c85917c/pull/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.393622 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp_a6b7cf7d-3b83-4abb-8ca1-9b623c85917c/util/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.433346 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp_a6b7cf7d-3b83-4abb-8ca1-9b623c85917c/extract/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.472713 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a22p4vp_a6b7cf7d-3b83-4abb-8ca1-9b623c85917c/pull/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.625595 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb_c41acced-4000-4e9d-ade1-8fbc9f93e303/util/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.800675 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb_c41acced-4000-4e9d-ade1-8fbc9f93e303/pull/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.825826 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb_c41acced-4000-4e9d-ade1-8fbc9f93e303/util/0.log" Jan 23 12:20:59 crc kubenswrapper[4689]: I0123 12:20:59.844288 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb_c41acced-4000-4e9d-ade1-8fbc9f93e303/pull/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.009455 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb_c41acced-4000-4e9d-ade1-8fbc9f93e303/pull/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.013214 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb_c41acced-4000-4e9d-ade1-8fbc9f93e303/util/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.016012 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg6zqb_c41acced-4000-4e9d-ade1-8fbc9f93e303/extract/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.233842 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd_007f3542-55a2-495b-a618-6933e425c7c3/util/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.416487 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd_007f3542-55a2-495b-a618-6933e425c7c3/pull/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.435957 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd_007f3542-55a2-495b-a618-6933e425c7c3/util/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.453265 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd_007f3542-55a2-495b-a618-6933e425c7c3/pull/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.608273 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd_007f3542-55a2-495b-a618-6933e425c7c3/util/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.628648 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd_007f3542-55a2-495b-a618-6933e425c7c3/pull/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.691278 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360b9hzfd_007f3542-55a2-495b-a618-6933e425c7c3/extract/0.log" Jan 23 12:21:00 crc kubenswrapper[4689]: I0123 12:21:00.812113 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd_755f37c8-04c1-462c-bcde-fec84986a51a/util/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.012968 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd_755f37c8-04c1-462c-bcde-fec84986a51a/util/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.029853 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd_755f37c8-04c1-462c-bcde-fec84986a51a/pull/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.060932 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd_755f37c8-04c1-462c-bcde-fec84986a51a/pull/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.169645 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd_755f37c8-04c1-462c-bcde-fec84986a51a/pull/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.217607 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd_755f37c8-04c1-462c-bcde-fec84986a51a/util/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.239024 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7134gxhd_755f37c8-04c1-462c-bcde-fec84986a51a/extract/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.377942 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7_b708d6b3-99ec-4a6e-8942-86cf6bf46362/util/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.565598 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7_b708d6b3-99ec-4a6e-8942-86cf6bf46362/pull/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.602124 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7_b708d6b3-99ec-4a6e-8942-86cf6bf46362/util/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.612031 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7_b708d6b3-99ec-4a6e-8942-86cf6bf46362/pull/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.762970 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7_b708d6b3-99ec-4a6e-8942-86cf6bf46362/util/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.792330 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7_b708d6b3-99ec-4a6e-8942-86cf6bf46362/extract/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.795922 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08bkwf7_b708d6b3-99ec-4a6e-8942-86cf6bf46362/pull/0.log" Jan 23 12:21:01 crc kubenswrapper[4689]: I0123 12:21:01.985420 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/extract-utilities/0.log" Jan 23 12:21:02 crc kubenswrapper[4689]: I0123 12:21:02.156743 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/extract-utilities/0.log" Jan 23 12:21:02 crc kubenswrapper[4689]: I0123 12:21:02.165446 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/extract-content/0.log" Jan 23 12:21:02 crc kubenswrapper[4689]: I0123 12:21:02.189280 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/extract-content/0.log" Jan 23 12:21:02 crc kubenswrapper[4689]: I0123 12:21:02.387293 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/extract-content/0.log" Jan 23 12:21:02 crc kubenswrapper[4689]: I0123 12:21:02.410430 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/extract-utilities/0.log" Jan 23 12:21:02 crc kubenswrapper[4689]: I0123 12:21:02.717766 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/extract-utilities/0.log" Jan 23 12:21:02 crc kubenswrapper[4689]: I0123 12:21:02.918932 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/extract-utilities/0.log" Jan 23 12:21:03 crc kubenswrapper[4689]: I0123 12:21:02.949970 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/extract-content/0.log" Jan 23 12:21:03 crc kubenswrapper[4689]: I0123 12:21:03.047914 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/extract-content/0.log" Jan 23 12:21:03 crc kubenswrapper[4689]: I0123 12:21:03.210591 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/extract-utilities/0.log" Jan 23 12:21:03 crc kubenswrapper[4689]: I0123 12:21:03.234168 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/extract-content/0.log" Jan 23 12:21:03 crc kubenswrapper[4689]: I0123 12:21:03.854838 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-s7k65_386d7669-fab2-42b9-ac43-767d9ae837b8/marketplace-operator/0.log" Jan 23 12:21:03 crc kubenswrapper[4689]: I0123 12:21:03.881962 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/registry-server/1.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.139455 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/extract-utilities/0.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.330959 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/registry-server/1.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.525700 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/extract-utilities/0.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.548738 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/extract-content/0.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.604034 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/extract-content/0.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.804261 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/extract-utilities/0.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.901369 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/registry-server/1.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.904376 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-x87gr_5fbbf7f9-c268-4a7b-a278-4f72a9099acf/registry-server/0.log" Jan 23 12:21:04 crc kubenswrapper[4689]: I0123 12:21:04.914665 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/extract-content/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.143613 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/extract-utilities/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.165773 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4hdbj_b7c32de2-03fb-4b12-8fdf-69161c24eed2/registry-server/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.232880 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-h6w2m_5f0a7d6b-1743-49ab-9f0b-2742ce992ecf/registry-server/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.395468 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/extract-utilities/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.404812 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/extract-content/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.410428 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/extract-content/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.606838 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/extract-utilities/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.666860 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/extract-content/0.log" Jan 23 12:21:05 crc kubenswrapper[4689]: I0123 12:21:05.736224 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/registry-server/1.log" Jan 23 12:21:06 crc kubenswrapper[4689]: I0123 12:21:06.402471 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-4zskl_0cb158e3-50d7-4750-8f95-c22d0a94a70f/registry-server/0.log" Jan 23 12:21:18 crc kubenswrapper[4689]: I0123 12:21:18.600357 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6cc975f456-bm8sk_2805243a-4a45-4dc0-b5a2-91c2163e11b4/prometheus-operator-admission-webhook/0.log" Jan 23 12:21:18 crc kubenswrapper[4689]: I0123 12:21:18.653973 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6cc975f456-qndbv_cf8299a2-da8d-489e-bc54-1212dd8d3099/prometheus-operator-admission-webhook/0.log" Jan 23 12:21:18 crc kubenswrapper[4689]: I0123 12:21:18.699861 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-5spsb_55938c76-e594-4530-b7ee-0e7f3089063e/prometheus-operator/0.log" Jan 23 12:21:18 crc kubenswrapper[4689]: I0123 12:21:18.794366 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-95bv6_45cadce8-d2da-450b-9b37-c2a6b2a1c595/operator/1.log" Jan 23 12:21:18 crc kubenswrapper[4689]: I0123 12:21:18.943579 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-95bv6_45cadce8-d2da-450b-9b37-c2a6b2a1c595/operator/0.log" Jan 23 12:21:18 crc kubenswrapper[4689]: I0123 12:21:18.950821 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-7qpjs_a13e2123-3780-4c13-b8a4-760d31e5636e/perses-operator/0.log" Jan 23 12:21:18 crc kubenswrapper[4689]: I0123 12:21:18.965791 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-fn992_0c864284-1e65-4712-a75f-bad9506f55d6/observability-ui-dashboards/0.log" Jan 23 12:21:32 crc kubenswrapper[4689]: I0123 12:21:32.376592 4689 scope.go:117] "RemoveContainer" containerID="5c3f4e709eda03d440452933b9b47f31c55ecf66a62a05e81dd0c5a80f67a080" Jan 23 12:21:32 crc kubenswrapper[4689]: I0123 12:21:32.403487 4689 scope.go:117] "RemoveContainer" containerID="94a962a1dbc6a2d7d5689506f873aca79bcf8ff7cf2891d52ce900d8e84b4a50" Jan 23 12:21:32 crc kubenswrapper[4689]: I0123 12:21:32.430559 4689 scope.go:117] "RemoveContainer" containerID="feca70f03cf25b83fb1ba69fbdee96df486f4ea1c819d0c483a67b0e93831640" Jan 23 12:21:32 crc kubenswrapper[4689]: I0123 12:21:32.456600 4689 scope.go:117] "RemoveContainer" containerID="c8c941db98c408fe90bb837ad6ce281735172be70bbb26b5af94eb6cd9c72443" Jan 23 12:21:32 crc kubenswrapper[4689]: I0123 12:21:32.491121 4689 scope.go:117] "RemoveContainer" containerID="d519166f00bcfc9c745f45cb04b29c770ba622da1eb7e5aa29c6c52c93f2aa47" Jan 23 12:21:33 crc kubenswrapper[4689]: I0123 12:21:33.311236 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:21:33 crc kubenswrapper[4689]: I0123 12:21:33.311638 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:21:33 crc kubenswrapper[4689]: I0123 12:21:33.546931 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-775d8c8b9c-rkqj2_6cbb7c9e-32cf-4368-8983-96d4006dcd58/manager/0.log" Jan 23 12:21:33 crc kubenswrapper[4689]: I0123 12:21:33.585342 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-775d8c8b9c-rkqj2_6cbb7c9e-32cf-4368-8983-96d4006dcd58/manager/1.log" Jan 23 12:21:33 crc kubenswrapper[4689]: I0123 12:21:33.592445 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-775d8c8b9c-rkqj2_6cbb7c9e-32cf-4368-8983-96d4006dcd58/kube-rbac-proxy/0.log" Jan 23 12:22:03 crc kubenswrapper[4689]: I0123 12:22:03.312736 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:22:03 crc kubenswrapper[4689]: I0123 12:22:03.313375 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.310631 4689 patch_prober.go:28] interesting pod/machine-config-daemon-sp7sf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.311092 4689 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.311133 4689 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.311942 4689 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0"} pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.311987 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerName="machine-config-daemon" containerID="cri-o://170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" gracePeriod=600 Jan 23 12:22:33 crc kubenswrapper[4689]: E0123 12:22:33.545217 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.841063 4689 generic.go:334] "Generic (PLEG): container finished" podID="3d8de6cc-a03d-468b-bfe9-fbf544087653" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" exitCode=0 Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.841130 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerDied","Data":"170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0"} Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.841239 4689 scope.go:117] "RemoveContainer" containerID="aa4f5b129cfa4ef2d2db5743f3b751306632ef017ae5776c4040e3f29f5f9480" Jan 23 12:22:33 crc kubenswrapper[4689]: I0123 12:22:33.842451 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:22:33 crc kubenswrapper[4689]: E0123 12:22:33.843121 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:22:44 crc kubenswrapper[4689]: I0123 12:22:44.640666 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:22:44 crc kubenswrapper[4689]: E0123 12:22:44.641452 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.020313 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-drdfl"] Jan 23 12:22:58 crc kubenswrapper[4689]: E0123 12:22:58.022192 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29b7dd4f-ba06-4727-8fd8-7bf4de191085" containerName="container-00" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.022213 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="29b7dd4f-ba06-4727-8fd8-7bf4de191085" containerName="container-00" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.022778 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="29b7dd4f-ba06-4727-8fd8-7bf4de191085" containerName="container-00" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.027253 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.040581 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-drdfl"] Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.108228 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-utilities\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.108496 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl9ck\" (UniqueName: \"kubernetes.io/projected/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-kube-api-access-bl9ck\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.109024 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-catalog-content\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.211473 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl9ck\" (UniqueName: \"kubernetes.io/projected/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-kube-api-access-bl9ck\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.211619 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-catalog-content\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.211695 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-utilities\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.214279 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-utilities\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.214346 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-catalog-content\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.237076 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl9ck\" (UniqueName: \"kubernetes.io/projected/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-kube-api-access-bl9ck\") pod \"certified-operators-drdfl\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:58 crc kubenswrapper[4689]: I0123 12:22:58.353207 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:22:59 crc kubenswrapper[4689]: I0123 12:22:59.024403 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-drdfl"] Jan 23 12:22:59 crc kubenswrapper[4689]: I0123 12:22:59.215647 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drdfl" event={"ID":"b327e3cc-def4-4e60-8c38-13f49ccf5aa9","Type":"ContainerStarted","Data":"03ba82daa79e9b1b79690a1bf283aff6a063508adfb0e8bc23e813d348a5224a"} Jan 23 12:22:59 crc kubenswrapper[4689]: I0123 12:22:59.641820 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:22:59 crc kubenswrapper[4689]: E0123 12:22:59.644576 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:23:00 crc kubenswrapper[4689]: I0123 12:23:00.227393 4689 generic.go:334] "Generic (PLEG): container finished" podID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerID="afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca" exitCode=0 Jan 23 12:23:00 crc kubenswrapper[4689]: I0123 12:23:00.227437 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drdfl" event={"ID":"b327e3cc-def4-4e60-8c38-13f49ccf5aa9","Type":"ContainerDied","Data":"afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca"} Jan 23 12:23:00 crc kubenswrapper[4689]: I0123 12:23:00.230352 4689 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 12:23:02 crc kubenswrapper[4689]: I0123 12:23:02.258092 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drdfl" event={"ID":"b327e3cc-def4-4e60-8c38-13f49ccf5aa9","Type":"ContainerStarted","Data":"689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee"} Jan 23 12:23:03 crc kubenswrapper[4689]: I0123 12:23:03.272629 4689 generic.go:334] "Generic (PLEG): container finished" podID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerID="689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee" exitCode=0 Jan 23 12:23:03 crc kubenswrapper[4689]: I0123 12:23:03.272708 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drdfl" event={"ID":"b327e3cc-def4-4e60-8c38-13f49ccf5aa9","Type":"ContainerDied","Data":"689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee"} Jan 23 12:23:04 crc kubenswrapper[4689]: I0123 12:23:04.294449 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drdfl" event={"ID":"b327e3cc-def4-4e60-8c38-13f49ccf5aa9","Type":"ContainerStarted","Data":"6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f"} Jan 23 12:23:08 crc kubenswrapper[4689]: I0123 12:23:08.362429 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:23:08 crc kubenswrapper[4689]: I0123 12:23:08.362932 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:23:08 crc kubenswrapper[4689]: I0123 12:23:08.424393 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:23:08 crc kubenswrapper[4689]: I0123 12:23:08.477921 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-drdfl" podStartSLOduration=7.986943621 podStartE2EDuration="11.477876143s" podCreationTimestamp="2026-01-23 12:22:57 +0000 UTC" firstStartedPulling="2026-01-23 12:23:00.229206153 +0000 UTC m=+5644.853886012" lastFinishedPulling="2026-01-23 12:23:03.720138675 +0000 UTC m=+5648.344818534" observedRunningTime="2026-01-23 12:23:04.32599389 +0000 UTC m=+5648.950673759" watchObservedRunningTime="2026-01-23 12:23:08.477876143 +0000 UTC m=+5653.102556012" Jan 23 12:23:09 crc kubenswrapper[4689]: I0123 12:23:09.439934 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:23:09 crc kubenswrapper[4689]: I0123 12:23:09.502478 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-drdfl"] Jan 23 12:23:10 crc kubenswrapper[4689]: I0123 12:23:10.641465 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:23:10 crc kubenswrapper[4689]: E0123 12:23:10.642365 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:23:11 crc kubenswrapper[4689]: I0123 12:23:11.401708 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-drdfl" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="registry-server" containerID="cri-o://6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f" gracePeriod=2 Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.049264 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.105090 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-utilities\") pod \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.105460 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-catalog-content\") pod \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.105511 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl9ck\" (UniqueName: \"kubernetes.io/projected/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-kube-api-access-bl9ck\") pod \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\" (UID: \"b327e3cc-def4-4e60-8c38-13f49ccf5aa9\") " Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.106543 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-utilities" (OuterVolumeSpecName: "utilities") pod "b327e3cc-def4-4e60-8c38-13f49ccf5aa9" (UID: "b327e3cc-def4-4e60-8c38-13f49ccf5aa9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.113613 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-kube-api-access-bl9ck" (OuterVolumeSpecName: "kube-api-access-bl9ck") pod "b327e3cc-def4-4e60-8c38-13f49ccf5aa9" (UID: "b327e3cc-def4-4e60-8c38-13f49ccf5aa9"). InnerVolumeSpecName "kube-api-access-bl9ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.156915 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b327e3cc-def4-4e60-8c38-13f49ccf5aa9" (UID: "b327e3cc-def4-4e60-8c38-13f49ccf5aa9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.209107 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.209173 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl9ck\" (UniqueName: \"kubernetes.io/projected/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-kube-api-access-bl9ck\") on node \"crc\" DevicePath \"\"" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.209187 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b327e3cc-def4-4e60-8c38-13f49ccf5aa9-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.413726 4689 generic.go:334] "Generic (PLEG): container finished" podID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerID="6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f" exitCode=0 Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.413777 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drdfl" event={"ID":"b327e3cc-def4-4e60-8c38-13f49ccf5aa9","Type":"ContainerDied","Data":"6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f"} Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.413808 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-drdfl" event={"ID":"b327e3cc-def4-4e60-8c38-13f49ccf5aa9","Type":"ContainerDied","Data":"03ba82daa79e9b1b79690a1bf283aff6a063508adfb0e8bc23e813d348a5224a"} Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.413828 4689 scope.go:117] "RemoveContainer" containerID="6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.414013 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-drdfl" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.448463 4689 scope.go:117] "RemoveContainer" containerID="689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.463883 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-drdfl"] Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.482946 4689 scope.go:117] "RemoveContainer" containerID="afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.490510 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-drdfl"] Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.549905 4689 scope.go:117] "RemoveContainer" containerID="6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f" Jan 23 12:23:12 crc kubenswrapper[4689]: E0123 12:23:12.550406 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f\": container with ID starting with 6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f not found: ID does not exist" containerID="6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.550852 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f"} err="failed to get container status \"6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f\": rpc error: code = NotFound desc = could not find container \"6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f\": container with ID starting with 6571f907ade30893ab69428d0a351724fde3916b7aae0b22adcc671d6b8ed91f not found: ID does not exist" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.550930 4689 scope.go:117] "RemoveContainer" containerID="689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee" Jan 23 12:23:12 crc kubenswrapper[4689]: E0123 12:23:12.551341 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee\": container with ID starting with 689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee not found: ID does not exist" containerID="689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.551421 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee"} err="failed to get container status \"689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee\": rpc error: code = NotFound desc = could not find container \"689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee\": container with ID starting with 689115a814f41790a140d1a3ad48ce46a04ed90980baf3a20a3cc25f897966ee not found: ID does not exist" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.551454 4689 scope.go:117] "RemoveContainer" containerID="afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca" Jan 23 12:23:12 crc kubenswrapper[4689]: E0123 12:23:12.551725 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca\": container with ID starting with afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca not found: ID does not exist" containerID="afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca" Jan 23 12:23:12 crc kubenswrapper[4689]: I0123 12:23:12.551778 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca"} err="failed to get container status \"afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca\": rpc error: code = NotFound desc = could not find container \"afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca\": container with ID starting with afa412e950952895a9d0949b56cdde951b080fc8f2d28891f30731bd74b904ca not found: ID does not exist" Jan 23 12:23:13 crc kubenswrapper[4689]: I0123 12:23:13.667011 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" path="/var/lib/kubelet/pods/b327e3cc-def4-4e60-8c38-13f49ccf5aa9/volumes" Jan 23 12:23:25 crc kubenswrapper[4689]: I0123 12:23:25.650507 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:23:25 crc kubenswrapper[4689]: E0123 12:23:25.651514 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:23:39 crc kubenswrapper[4689]: I0123 12:23:39.640083 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:23:39 crc kubenswrapper[4689]: E0123 12:23:39.640899 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:23:43 crc kubenswrapper[4689]: I0123 12:23:43.814923 4689 generic.go:334] "Generic (PLEG): container finished" podID="2b1985c0-6892-4629-9f40-fe58155f22df" containerID="280f6fa62adee5a95242d4bfdb303233af7761812447d940872c877f63f93b24" exitCode=0 Jan 23 12:23:43 crc kubenswrapper[4689]: I0123 12:23:43.814960 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rr6h7/must-gather-xszvc" event={"ID":"2b1985c0-6892-4629-9f40-fe58155f22df","Type":"ContainerDied","Data":"280f6fa62adee5a95242d4bfdb303233af7761812447d940872c877f63f93b24"} Jan 23 12:23:43 crc kubenswrapper[4689]: I0123 12:23:43.816076 4689 scope.go:117] "RemoveContainer" containerID="280f6fa62adee5a95242d4bfdb303233af7761812447d940872c877f63f93b24" Jan 23 12:23:44 crc kubenswrapper[4689]: I0123 12:23:44.545575 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rr6h7_must-gather-xszvc_2b1985c0-6892-4629-9f40-fe58155f22df/gather/0.log" Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.376462 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rr6h7/must-gather-xszvc"] Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.377532 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-rr6h7/must-gather-xszvc" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" containerName="copy" containerID="cri-o://c00b578b8aa59bc83b6fe1fe6fae5821747f9536ef6a2b221fe0ab93504f6416" gracePeriod=2 Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.396043 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rr6h7/must-gather-xszvc"] Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.640491 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:23:53 crc kubenswrapper[4689]: E0123 12:23:53.641177 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.941375 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rr6h7_must-gather-xszvc_2b1985c0-6892-4629-9f40-fe58155f22df/copy/0.log" Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.941724 4689 generic.go:334] "Generic (PLEG): container finished" podID="2b1985c0-6892-4629-9f40-fe58155f22df" containerID="c00b578b8aa59bc83b6fe1fe6fae5821747f9536ef6a2b221fe0ab93504f6416" exitCode=143 Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.941774 4689 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e90b351fb830e68d5696ef04ef8abf536b6d983f430b011a4db1640c712e7c25" Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.966920 4689 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-rr6h7_must-gather-xszvc_2b1985c0-6892-4629-9f40-fe58155f22df/copy/0.log" Jan 23 12:23:53 crc kubenswrapper[4689]: I0123 12:23:53.967377 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:23:54 crc kubenswrapper[4689]: I0123 12:23:54.068505 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9n9sp\" (UniqueName: \"kubernetes.io/projected/2b1985c0-6892-4629-9f40-fe58155f22df-kube-api-access-9n9sp\") pod \"2b1985c0-6892-4629-9f40-fe58155f22df\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " Jan 23 12:23:54 crc kubenswrapper[4689]: I0123 12:23:54.068610 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2b1985c0-6892-4629-9f40-fe58155f22df-must-gather-output\") pod \"2b1985c0-6892-4629-9f40-fe58155f22df\" (UID: \"2b1985c0-6892-4629-9f40-fe58155f22df\") " Jan 23 12:23:54 crc kubenswrapper[4689]: I0123 12:23:54.084877 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b1985c0-6892-4629-9f40-fe58155f22df-kube-api-access-9n9sp" (OuterVolumeSpecName: "kube-api-access-9n9sp") pod "2b1985c0-6892-4629-9f40-fe58155f22df" (UID: "2b1985c0-6892-4629-9f40-fe58155f22df"). InnerVolumeSpecName "kube-api-access-9n9sp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:23:54 crc kubenswrapper[4689]: I0123 12:23:54.171803 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9n9sp\" (UniqueName: \"kubernetes.io/projected/2b1985c0-6892-4629-9f40-fe58155f22df-kube-api-access-9n9sp\") on node \"crc\" DevicePath \"\"" Jan 23 12:23:54 crc kubenswrapper[4689]: I0123 12:23:54.260013 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b1985c0-6892-4629-9f40-fe58155f22df-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "2b1985c0-6892-4629-9f40-fe58155f22df" (UID: "2b1985c0-6892-4629-9f40-fe58155f22df"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:23:54 crc kubenswrapper[4689]: I0123 12:23:54.273864 4689 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/2b1985c0-6892-4629-9f40-fe58155f22df-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 23 12:23:54 crc kubenswrapper[4689]: I0123 12:23:54.950233 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rr6h7/must-gather-xszvc" Jan 23 12:23:55 crc kubenswrapper[4689]: I0123 12:23:55.653583 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" path="/var/lib/kubelet/pods/2b1985c0-6892-4629-9f40-fe58155f22df/volumes" Jan 23 12:24:04 crc kubenswrapper[4689]: I0123 12:24:04.641199 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:24:04 crc kubenswrapper[4689]: E0123 12:24:04.641942 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:24:17 crc kubenswrapper[4689]: I0123 12:24:17.641263 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:24:17 crc kubenswrapper[4689]: E0123 12:24:17.643108 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.585334 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-85gh6"] Jan 23 12:24:26 crc kubenswrapper[4689]: E0123 12:24:26.587099 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="registry-server" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587116 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="registry-server" Jan 23 12:24:26 crc kubenswrapper[4689]: E0123 12:24:26.587215 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="extract-utilities" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587224 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="extract-utilities" Jan 23 12:24:26 crc kubenswrapper[4689]: E0123 12:24:26.587273 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" containerName="gather" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587280 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" containerName="gather" Jan 23 12:24:26 crc kubenswrapper[4689]: E0123 12:24:26.587300 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" containerName="copy" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587306 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" containerName="copy" Jan 23 12:24:26 crc kubenswrapper[4689]: E0123 12:24:26.587333 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="extract-content" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587339 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="extract-content" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587547 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" containerName="gather" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587569 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="b327e3cc-def4-4e60-8c38-13f49ccf5aa9" containerName="registry-server" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.587582 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b1985c0-6892-4629-9f40-fe58155f22df" containerName="copy" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.589289 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.602062 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-85gh6"] Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.701538 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-catalog-content\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.701931 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkcmt\" (UniqueName: \"kubernetes.io/projected/3366dbbe-1cce-45c4-886b-5643ba410b5b-kube-api-access-pkcmt\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.702307 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-utilities\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.804655 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-utilities\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.804777 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-catalog-content\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.804876 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkcmt\" (UniqueName: \"kubernetes.io/projected/3366dbbe-1cce-45c4-886b-5643ba410b5b-kube-api-access-pkcmt\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.805128 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-utilities\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.805401 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-catalog-content\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.832767 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkcmt\" (UniqueName: \"kubernetes.io/projected/3366dbbe-1cce-45c4-886b-5643ba410b5b-kube-api-access-pkcmt\") pod \"community-operators-85gh6\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:26 crc kubenswrapper[4689]: I0123 12:24:26.923939 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:27 crc kubenswrapper[4689]: I0123 12:24:27.432447 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-85gh6"] Jan 23 12:24:28 crc kubenswrapper[4689]: I0123 12:24:28.408558 4689 generic.go:334] "Generic (PLEG): container finished" podID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerID="83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a" exitCode=0 Jan 23 12:24:28 crc kubenswrapper[4689]: I0123 12:24:28.408803 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-85gh6" event={"ID":"3366dbbe-1cce-45c4-886b-5643ba410b5b","Type":"ContainerDied","Data":"83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a"} Jan 23 12:24:28 crc kubenswrapper[4689]: I0123 12:24:28.408834 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-85gh6" event={"ID":"3366dbbe-1cce-45c4-886b-5643ba410b5b","Type":"ContainerStarted","Data":"86549bbbc8d459b4aef320d15ff173a882f936752d9bf18872b8b1dc1fee26b0"} Jan 23 12:24:29 crc kubenswrapper[4689]: I0123 12:24:29.424014 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-85gh6" event={"ID":"3366dbbe-1cce-45c4-886b-5643ba410b5b","Type":"ContainerStarted","Data":"cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f"} Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.178101 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5zbcs"] Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.182525 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.197269 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5zbcs"] Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.228501 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-utilities\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.228663 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slz28\" (UniqueName: \"kubernetes.io/projected/fd90e861-674f-4459-aced-d5e087980b92-kube-api-access-slz28\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.228941 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-catalog-content\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.330825 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-catalog-content\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.330892 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-utilities\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.331178 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slz28\" (UniqueName: \"kubernetes.io/projected/fd90e861-674f-4459-aced-d5e087980b92-kube-api-access-slz28\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.331418 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-catalog-content\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.331452 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-utilities\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.354174 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slz28\" (UniqueName: \"kubernetes.io/projected/fd90e861-674f-4459-aced-d5e087980b92-kube-api-access-slz28\") pod \"redhat-marketplace-5zbcs\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.449293 4689 generic.go:334] "Generic (PLEG): container finished" podID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerID="cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f" exitCode=0 Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.449342 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-85gh6" event={"ID":"3366dbbe-1cce-45c4-886b-5643ba410b5b","Type":"ContainerDied","Data":"cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f"} Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.552312 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:31 crc kubenswrapper[4689]: I0123 12:24:31.640586 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:24:31 crc kubenswrapper[4689]: E0123 12:24:31.640997 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:24:32 crc kubenswrapper[4689]: I0123 12:24:32.077555 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5zbcs"] Jan 23 12:24:32 crc kubenswrapper[4689]: W0123 12:24:32.248453 4689 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd90e861_674f_4459_aced_d5e087980b92.slice/crio-5d5a57187cffdd74516959c1e37aaae062357e10a8790355b9d2c6f05dea7b23 WatchSource:0}: Error finding container 5d5a57187cffdd74516959c1e37aaae062357e10a8790355b9d2c6f05dea7b23: Status 404 returned error can't find the container with id 5d5a57187cffdd74516959c1e37aaae062357e10a8790355b9d2c6f05dea7b23 Jan 23 12:24:32 crc kubenswrapper[4689]: I0123 12:24:32.465935 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5zbcs" event={"ID":"fd90e861-674f-4459-aced-d5e087980b92","Type":"ContainerStarted","Data":"5d5a57187cffdd74516959c1e37aaae062357e10a8790355b9d2c6f05dea7b23"} Jan 23 12:24:32 crc kubenswrapper[4689]: I0123 12:24:32.706737 4689 scope.go:117] "RemoveContainer" containerID="280f6fa62adee5a95242d4bfdb303233af7761812447d940872c877f63f93b24" Jan 23 12:24:32 crc kubenswrapper[4689]: I0123 12:24:32.796510 4689 scope.go:117] "RemoveContainer" containerID="c00b578b8aa59bc83b6fe1fe6fae5821747f9536ef6a2b221fe0ab93504f6416" Jan 23 12:24:33 crc kubenswrapper[4689]: I0123 12:24:33.499706 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-85gh6" event={"ID":"3366dbbe-1cce-45c4-886b-5643ba410b5b","Type":"ContainerStarted","Data":"bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774"} Jan 23 12:24:33 crc kubenswrapper[4689]: I0123 12:24:33.502701 4689 generic.go:334] "Generic (PLEG): container finished" podID="fd90e861-674f-4459-aced-d5e087980b92" containerID="e36d16cffd07ea9ec722b53e2b645de105506d13d8b3063cca8f51d8e7df48db" exitCode=0 Jan 23 12:24:33 crc kubenswrapper[4689]: I0123 12:24:33.502750 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5zbcs" event={"ID":"fd90e861-674f-4459-aced-d5e087980b92","Type":"ContainerDied","Data":"e36d16cffd07ea9ec722b53e2b645de105506d13d8b3063cca8f51d8e7df48db"} Jan 23 12:24:33 crc kubenswrapper[4689]: I0123 12:24:33.529358 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-85gh6" podStartSLOduration=3.6274611610000003 podStartE2EDuration="7.529344847s" podCreationTimestamp="2026-01-23 12:24:26 +0000 UTC" firstStartedPulling="2026-01-23 12:24:28.411227741 +0000 UTC m=+5733.035907600" lastFinishedPulling="2026-01-23 12:24:32.313111427 +0000 UTC m=+5736.937791286" observedRunningTime="2026-01-23 12:24:33.527561194 +0000 UTC m=+5738.152241053" watchObservedRunningTime="2026-01-23 12:24:33.529344847 +0000 UTC m=+5738.154024706" Jan 23 12:24:35 crc kubenswrapper[4689]: I0123 12:24:35.532446 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5zbcs" event={"ID":"fd90e861-674f-4459-aced-d5e087980b92","Type":"ContainerStarted","Data":"82bbd45ed68616ec6c7f3e4a85be267814193ea6787abd686623e745899079d6"} Jan 23 12:24:36 crc kubenswrapper[4689]: I0123 12:24:36.547597 4689 generic.go:334] "Generic (PLEG): container finished" podID="fd90e861-674f-4459-aced-d5e087980b92" containerID="82bbd45ed68616ec6c7f3e4a85be267814193ea6787abd686623e745899079d6" exitCode=0 Jan 23 12:24:36 crc kubenswrapper[4689]: I0123 12:24:36.547745 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5zbcs" event={"ID":"fd90e861-674f-4459-aced-d5e087980b92","Type":"ContainerDied","Data":"82bbd45ed68616ec6c7f3e4a85be267814193ea6787abd686623e745899079d6"} Jan 23 12:24:36 crc kubenswrapper[4689]: I0123 12:24:36.924571 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:36 crc kubenswrapper[4689]: I0123 12:24:36.924625 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:38 crc kubenswrapper[4689]: I0123 12:24:38.000516 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-85gh6" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="registry-server" probeResult="failure" output=< Jan 23 12:24:38 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:24:38 crc kubenswrapper[4689]: > Jan 23 12:24:38 crc kubenswrapper[4689]: I0123 12:24:38.573607 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5zbcs" event={"ID":"fd90e861-674f-4459-aced-d5e087980b92","Type":"ContainerStarted","Data":"19a253eaaf2666e883c4fe413ec7c2720cd6a3fcc4883c619e0e144c7a3a832f"} Jan 23 12:24:38 crc kubenswrapper[4689]: I0123 12:24:38.599827 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5zbcs" podStartSLOduration=3.431019732 podStartE2EDuration="7.599808146s" podCreationTimestamp="2026-01-23 12:24:31 +0000 UTC" firstStartedPulling="2026-01-23 12:24:33.507894207 +0000 UTC m=+5738.132574066" lastFinishedPulling="2026-01-23 12:24:37.676682601 +0000 UTC m=+5742.301362480" observedRunningTime="2026-01-23 12:24:38.59166887 +0000 UTC m=+5743.216348729" watchObservedRunningTime="2026-01-23 12:24:38.599808146 +0000 UTC m=+5743.224488005" Jan 23 12:24:41 crc kubenswrapper[4689]: I0123 12:24:41.553272 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:41 crc kubenswrapper[4689]: I0123 12:24:41.553772 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:42 crc kubenswrapper[4689]: I0123 12:24:42.599852 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-5zbcs" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="registry-server" probeResult="failure" output=< Jan 23 12:24:42 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:24:42 crc kubenswrapper[4689]: > Jan 23 12:24:44 crc kubenswrapper[4689]: I0123 12:24:44.640677 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:24:44 crc kubenswrapper[4689]: E0123 12:24:44.641487 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:24:47 crc kubenswrapper[4689]: I0123 12:24:47.003786 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:47 crc kubenswrapper[4689]: I0123 12:24:47.074656 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:47 crc kubenswrapper[4689]: I0123 12:24:47.260430 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-85gh6"] Jan 23 12:24:48 crc kubenswrapper[4689]: I0123 12:24:48.728190 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-85gh6" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="registry-server" containerID="cri-o://bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774" gracePeriod=2 Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.237744 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.337090 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkcmt\" (UniqueName: \"kubernetes.io/projected/3366dbbe-1cce-45c4-886b-5643ba410b5b-kube-api-access-pkcmt\") pod \"3366dbbe-1cce-45c4-886b-5643ba410b5b\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.337557 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-catalog-content\") pod \"3366dbbe-1cce-45c4-886b-5643ba410b5b\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.337660 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-utilities\") pod \"3366dbbe-1cce-45c4-886b-5643ba410b5b\" (UID: \"3366dbbe-1cce-45c4-886b-5643ba410b5b\") " Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.338845 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-utilities" (OuterVolumeSpecName: "utilities") pod "3366dbbe-1cce-45c4-886b-5643ba410b5b" (UID: "3366dbbe-1cce-45c4-886b-5643ba410b5b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.344961 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3366dbbe-1cce-45c4-886b-5643ba410b5b-kube-api-access-pkcmt" (OuterVolumeSpecName: "kube-api-access-pkcmt") pod "3366dbbe-1cce-45c4-886b-5643ba410b5b" (UID: "3366dbbe-1cce-45c4-886b-5643ba410b5b"). InnerVolumeSpecName "kube-api-access-pkcmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.408731 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3366dbbe-1cce-45c4-886b-5643ba410b5b" (UID: "3366dbbe-1cce-45c4-886b-5643ba410b5b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.440505 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkcmt\" (UniqueName: \"kubernetes.io/projected/3366dbbe-1cce-45c4-886b-5643ba410b5b-kube-api-access-pkcmt\") on node \"crc\" DevicePath \"\"" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.440543 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.440552 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3366dbbe-1cce-45c4-886b-5643ba410b5b-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.740909 4689 generic.go:334] "Generic (PLEG): container finished" podID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerID="bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774" exitCode=0 Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.740984 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-85gh6" event={"ID":"3366dbbe-1cce-45c4-886b-5643ba410b5b","Type":"ContainerDied","Data":"bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774"} Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.741028 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-85gh6" event={"ID":"3366dbbe-1cce-45c4-886b-5643ba410b5b","Type":"ContainerDied","Data":"86549bbbc8d459b4aef320d15ff173a882f936752d9bf18872b8b1dc1fee26b0"} Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.741055 4689 scope.go:117] "RemoveContainer" containerID="bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.742133 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-85gh6" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.769943 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-85gh6"] Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.780277 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-85gh6"] Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.787234 4689 scope.go:117] "RemoveContainer" containerID="cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.807882 4689 scope.go:117] "RemoveContainer" containerID="83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.875178 4689 scope.go:117] "RemoveContainer" containerID="bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774" Jan 23 12:24:49 crc kubenswrapper[4689]: E0123 12:24:49.875837 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774\": container with ID starting with bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774 not found: ID does not exist" containerID="bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.875874 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774"} err="failed to get container status \"bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774\": rpc error: code = NotFound desc = could not find container \"bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774\": container with ID starting with bdd55bdf142cb0f5f39b9c55634956803974e1f1ec4ecf3087ee3998268aa774 not found: ID does not exist" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.875898 4689 scope.go:117] "RemoveContainer" containerID="cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f" Jan 23 12:24:49 crc kubenswrapper[4689]: E0123 12:24:49.876280 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f\": container with ID starting with cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f not found: ID does not exist" containerID="cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.876392 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f"} err="failed to get container status \"cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f\": rpc error: code = NotFound desc = could not find container \"cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f\": container with ID starting with cbb1118b8d231f303b3b360bc5667798318322da1e56183c043f5297b6c82f8f not found: ID does not exist" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.876480 4689 scope.go:117] "RemoveContainer" containerID="83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a" Jan 23 12:24:49 crc kubenswrapper[4689]: E0123 12:24:49.876826 4689 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a\": container with ID starting with 83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a not found: ID does not exist" containerID="83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a" Jan 23 12:24:49 crc kubenswrapper[4689]: I0123 12:24:49.876851 4689 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a"} err="failed to get container status \"83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a\": rpc error: code = NotFound desc = could not find container \"83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a\": container with ID starting with 83030aedce05fa25bd06a6d2bc8d8a15ce910bf1a94572565e70609a3dabd03a not found: ID does not exist" Jan 23 12:24:51 crc kubenswrapper[4689]: I0123 12:24:51.613139 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:51 crc kubenswrapper[4689]: I0123 12:24:51.654958 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" path="/var/lib/kubelet/pods/3366dbbe-1cce-45c4-886b-5643ba410b5b/volumes" Jan 23 12:24:51 crc kubenswrapper[4689]: I0123 12:24:51.664728 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:52 crc kubenswrapper[4689]: I0123 12:24:52.661959 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5zbcs"] Jan 23 12:24:52 crc kubenswrapper[4689]: I0123 12:24:52.782628 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5zbcs" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="registry-server" containerID="cri-o://19a253eaaf2666e883c4fe413ec7c2720cd6a3fcc4883c619e0e144c7a3a832f" gracePeriod=2 Jan 23 12:24:53 crc kubenswrapper[4689]: I0123 12:24:53.808343 4689 generic.go:334] "Generic (PLEG): container finished" podID="fd90e861-674f-4459-aced-d5e087980b92" containerID="19a253eaaf2666e883c4fe413ec7c2720cd6a3fcc4883c619e0e144c7a3a832f" exitCode=0 Jan 23 12:24:53 crc kubenswrapper[4689]: I0123 12:24:53.808421 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5zbcs" event={"ID":"fd90e861-674f-4459-aced-d5e087980b92","Type":"ContainerDied","Data":"19a253eaaf2666e883c4fe413ec7c2720cd6a3fcc4883c619e0e144c7a3a832f"} Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.028414 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.162639 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-catalog-content\") pod \"fd90e861-674f-4459-aced-d5e087980b92\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.163016 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-utilities\") pod \"fd90e861-674f-4459-aced-d5e087980b92\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.163641 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-utilities" (OuterVolumeSpecName: "utilities") pod "fd90e861-674f-4459-aced-d5e087980b92" (UID: "fd90e861-674f-4459-aced-d5e087980b92"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.164365 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slz28\" (UniqueName: \"kubernetes.io/projected/fd90e861-674f-4459-aced-d5e087980b92-kube-api-access-slz28\") pod \"fd90e861-674f-4459-aced-d5e087980b92\" (UID: \"fd90e861-674f-4459-aced-d5e087980b92\") " Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.165337 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.169304 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd90e861-674f-4459-aced-d5e087980b92-kube-api-access-slz28" (OuterVolumeSpecName: "kube-api-access-slz28") pod "fd90e861-674f-4459-aced-d5e087980b92" (UID: "fd90e861-674f-4459-aced-d5e087980b92"). InnerVolumeSpecName "kube-api-access-slz28". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.182996 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd90e861-674f-4459-aced-d5e087980b92" (UID: "fd90e861-674f-4459-aced-d5e087980b92"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.267538 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd90e861-674f-4459-aced-d5e087980b92-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.267577 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slz28\" (UniqueName: \"kubernetes.io/projected/fd90e861-674f-4459-aced-d5e087980b92-kube-api-access-slz28\") on node \"crc\" DevicePath \"\"" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.824599 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5zbcs" event={"ID":"fd90e861-674f-4459-aced-d5e087980b92","Type":"ContainerDied","Data":"5d5a57187cffdd74516959c1e37aaae062357e10a8790355b9d2c6f05dea7b23"} Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.824700 4689 scope.go:117] "RemoveContainer" containerID="19a253eaaf2666e883c4fe413ec7c2720cd6a3fcc4883c619e0e144c7a3a832f" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.824722 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5zbcs" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.869062 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5zbcs"] Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.873178 4689 scope.go:117] "RemoveContainer" containerID="82bbd45ed68616ec6c7f3e4a85be267814193ea6787abd686623e745899079d6" Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.881420 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5zbcs"] Jan 23 12:24:54 crc kubenswrapper[4689]: I0123 12:24:54.897501 4689 scope.go:117] "RemoveContainer" containerID="e36d16cffd07ea9ec722b53e2b645de105506d13d8b3063cca8f51d8e7df48db" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.678533 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd90e861-674f-4459-aced-d5e087980b92" path="/var/lib/kubelet/pods/fd90e861-674f-4459-aced-d5e087980b92/volumes" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.679699 4689 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r2s4h"] Jan 23 12:24:55 crc kubenswrapper[4689]: E0123 12:24:55.680063 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="registry-server" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680075 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="registry-server" Jan 23 12:24:55 crc kubenswrapper[4689]: E0123 12:24:55.680111 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="extract-content" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680118 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="extract-content" Jan 23 12:24:55 crc kubenswrapper[4689]: E0123 12:24:55.680178 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="extract-utilities" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680185 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="extract-utilities" Jan 23 12:24:55 crc kubenswrapper[4689]: E0123 12:24:55.680203 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="registry-server" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680210 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="registry-server" Jan 23 12:24:55 crc kubenswrapper[4689]: E0123 12:24:55.680242 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="extract-content" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680248 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="extract-content" Jan 23 12:24:55 crc kubenswrapper[4689]: E0123 12:24:55.680275 4689 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="extract-utilities" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680281 4689 state_mem.go:107] "Deleted CPUSet assignment" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="extract-utilities" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680633 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd90e861-674f-4459-aced-d5e087980b92" containerName="registry-server" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.680699 4689 memory_manager.go:354] "RemoveStaleState removing state" podUID="3366dbbe-1cce-45c4-886b-5643ba410b5b" containerName="registry-server" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.683482 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.688096 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r2s4h"] Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.819363 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-utilities\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.819598 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-catalog-content\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.819656 4689 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9b8l\" (UniqueName: \"kubernetes.io/projected/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-kube-api-access-b9b8l\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.921226 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-utilities\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.921342 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-catalog-content\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.921375 4689 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9b8l\" (UniqueName: \"kubernetes.io/projected/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-kube-api-access-b9b8l\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.922238 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-utilities\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.929259 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-catalog-content\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:55 crc kubenswrapper[4689]: I0123 12:24:55.952481 4689 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9b8l\" (UniqueName: \"kubernetes.io/projected/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-kube-api-access-b9b8l\") pod \"redhat-operators-r2s4h\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:56 crc kubenswrapper[4689]: I0123 12:24:56.024361 4689 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:24:56 crc kubenswrapper[4689]: I0123 12:24:56.582454 4689 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r2s4h"] Jan 23 12:24:56 crc kubenswrapper[4689]: I0123 12:24:56.884227 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2s4h" event={"ID":"dddc8bcc-5328-4d82-a808-bf8ad2b37f82","Type":"ContainerStarted","Data":"38c47e040497678efd2579cd711b358fd373e9995900b8e85f1e1b9b1d44cb58"} Jan 23 12:24:57 crc kubenswrapper[4689]: I0123 12:24:57.910103 4689 generic.go:334] "Generic (PLEG): container finished" podID="dddc8bcc-5328-4d82-a808-bf8ad2b37f82" containerID="3302a69cd0bb0f585b6afcb7c04f2ed375d6d18c760b614965c80eec7db9629e" exitCode=0 Jan 23 12:24:57 crc kubenswrapper[4689]: I0123 12:24:57.910502 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2s4h" event={"ID":"dddc8bcc-5328-4d82-a808-bf8ad2b37f82","Type":"ContainerDied","Data":"3302a69cd0bb0f585b6afcb7c04f2ed375d6d18c760b614965c80eec7db9629e"} Jan 23 12:24:58 crc kubenswrapper[4689]: I0123 12:24:58.640787 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:24:58 crc kubenswrapper[4689]: E0123 12:24:58.641490 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:24:59 crc kubenswrapper[4689]: I0123 12:24:59.932733 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2s4h" event={"ID":"dddc8bcc-5328-4d82-a808-bf8ad2b37f82","Type":"ContainerStarted","Data":"a5eaed77880856f5a81e17e1cc6218400b364f1eddaa819c5736b7dc195e27c6"} Jan 23 12:25:09 crc kubenswrapper[4689]: I0123 12:25:09.061623 4689 generic.go:334] "Generic (PLEG): container finished" podID="dddc8bcc-5328-4d82-a808-bf8ad2b37f82" containerID="a5eaed77880856f5a81e17e1cc6218400b364f1eddaa819c5736b7dc195e27c6" exitCode=0 Jan 23 12:25:09 crc kubenswrapper[4689]: I0123 12:25:09.061720 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2s4h" event={"ID":"dddc8bcc-5328-4d82-a808-bf8ad2b37f82","Type":"ContainerDied","Data":"a5eaed77880856f5a81e17e1cc6218400b364f1eddaa819c5736b7dc195e27c6"} Jan 23 12:25:11 crc kubenswrapper[4689]: I0123 12:25:11.085023 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2s4h" event={"ID":"dddc8bcc-5328-4d82-a808-bf8ad2b37f82","Type":"ContainerStarted","Data":"e8b45fe036ad55c6d48fd1cc593067f381601d19031f9fb6aa9815118f35878a"} Jan 23 12:25:11 crc kubenswrapper[4689]: I0123 12:25:11.104576 4689 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r2s4h" podStartSLOduration=4.393774854 podStartE2EDuration="16.104559006s" podCreationTimestamp="2026-01-23 12:24:55 +0000 UTC" firstStartedPulling="2026-01-23 12:24:57.923255941 +0000 UTC m=+5762.547935800" lastFinishedPulling="2026-01-23 12:25:09.634040093 +0000 UTC m=+5774.258719952" observedRunningTime="2026-01-23 12:25:11.102480525 +0000 UTC m=+5775.727160404" watchObservedRunningTime="2026-01-23 12:25:11.104559006 +0000 UTC m=+5775.729238865" Jan 23 12:25:12 crc kubenswrapper[4689]: I0123 12:25:12.640226 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:25:12 crc kubenswrapper[4689]: E0123 12:25:12.640892 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:25:16 crc kubenswrapper[4689]: I0123 12:25:16.025343 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:25:16 crc kubenswrapper[4689]: I0123 12:25:16.025971 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:25:17 crc kubenswrapper[4689]: I0123 12:25:17.074735 4689 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-r2s4h" podUID="dddc8bcc-5328-4d82-a808-bf8ad2b37f82" containerName="registry-server" probeResult="failure" output=< Jan 23 12:25:17 crc kubenswrapper[4689]: timeout: failed to connect service ":50051" within 1s Jan 23 12:25:17 crc kubenswrapper[4689]: > Jan 23 12:25:24 crc kubenswrapper[4689]: I0123 12:25:24.641216 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:25:24 crc kubenswrapper[4689]: E0123 12:25:24.642725 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:25:26 crc kubenswrapper[4689]: I0123 12:25:26.083978 4689 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:25:26 crc kubenswrapper[4689]: I0123 12:25:26.154750 4689 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:25:26 crc kubenswrapper[4689]: I0123 12:25:26.886571 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r2s4h"] Jan 23 12:25:27 crc kubenswrapper[4689]: I0123 12:25:27.289001 4689 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r2s4h" podUID="dddc8bcc-5328-4d82-a808-bf8ad2b37f82" containerName="registry-server" containerID="cri-o://e8b45fe036ad55c6d48fd1cc593067f381601d19031f9fb6aa9815118f35878a" gracePeriod=2 Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.303283 4689 generic.go:334] "Generic (PLEG): container finished" podID="dddc8bcc-5328-4d82-a808-bf8ad2b37f82" containerID="e8b45fe036ad55c6d48fd1cc593067f381601d19031f9fb6aa9815118f35878a" exitCode=0 Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.303341 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2s4h" event={"ID":"dddc8bcc-5328-4d82-a808-bf8ad2b37f82","Type":"ContainerDied","Data":"e8b45fe036ad55c6d48fd1cc593067f381601d19031f9fb6aa9815118f35878a"} Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.580970 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.599954 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9b8l\" (UniqueName: \"kubernetes.io/projected/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-kube-api-access-b9b8l\") pod \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.600126 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-utilities\") pod \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.600227 4689 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-catalog-content\") pod \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\" (UID: \"dddc8bcc-5328-4d82-a808-bf8ad2b37f82\") " Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.601046 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-utilities" (OuterVolumeSpecName: "utilities") pod "dddc8bcc-5328-4d82-a808-bf8ad2b37f82" (UID: "dddc8bcc-5328-4d82-a808-bf8ad2b37f82"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.615369 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-kube-api-access-b9b8l" (OuterVolumeSpecName: "kube-api-access-b9b8l") pod "dddc8bcc-5328-4d82-a808-bf8ad2b37f82" (UID: "dddc8bcc-5328-4d82-a808-bf8ad2b37f82"). InnerVolumeSpecName "kube-api-access-b9b8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.703844 4689 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.704120 4689 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9b8l\" (UniqueName: \"kubernetes.io/projected/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-kube-api-access-b9b8l\") on node \"crc\" DevicePath \"\"" Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.729851 4689 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dddc8bcc-5328-4d82-a808-bf8ad2b37f82" (UID: "dddc8bcc-5328-4d82-a808-bf8ad2b37f82"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 12:25:28 crc kubenswrapper[4689]: I0123 12:25:28.805932 4689 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddc8bcc-5328-4d82-a808-bf8ad2b37f82-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.319006 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2s4h" event={"ID":"dddc8bcc-5328-4d82-a808-bf8ad2b37f82","Type":"ContainerDied","Data":"38c47e040497678efd2579cd711b358fd373e9995900b8e85f1e1b9b1d44cb58"} Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.319064 4689 scope.go:117] "RemoveContainer" containerID="e8b45fe036ad55c6d48fd1cc593067f381601d19031f9fb6aa9815118f35878a" Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.319275 4689 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2s4h" Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.344733 4689 scope.go:117] "RemoveContainer" containerID="a5eaed77880856f5a81e17e1cc6218400b364f1eddaa819c5736b7dc195e27c6" Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.378781 4689 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r2s4h"] Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.385107 4689 scope.go:117] "RemoveContainer" containerID="3302a69cd0bb0f585b6afcb7c04f2ed375d6d18c760b614965c80eec7db9629e" Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.394697 4689 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r2s4h"] Jan 23 12:25:29 crc kubenswrapper[4689]: I0123 12:25:29.668107 4689 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dddc8bcc-5328-4d82-a808-bf8ad2b37f82" path="/var/lib/kubelet/pods/dddc8bcc-5328-4d82-a808-bf8ad2b37f82/volumes" Jan 23 12:25:39 crc kubenswrapper[4689]: I0123 12:25:39.640531 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:25:39 crc kubenswrapper[4689]: E0123 12:25:39.641325 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:25:50 crc kubenswrapper[4689]: I0123 12:25:50.641036 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:25:50 crc kubenswrapper[4689]: E0123 12:25:50.641978 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:26:01 crc kubenswrapper[4689]: I0123 12:26:01.641558 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:26:01 crc kubenswrapper[4689]: E0123 12:26:01.643334 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:26:14 crc kubenswrapper[4689]: I0123 12:26:14.642218 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:26:14 crc kubenswrapper[4689]: E0123 12:26:14.643331 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:26:29 crc kubenswrapper[4689]: I0123 12:26:29.641145 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:26:29 crc kubenswrapper[4689]: E0123 12:26:29.642240 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:26:42 crc kubenswrapper[4689]: I0123 12:26:42.640522 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:26:42 crc kubenswrapper[4689]: E0123 12:26:42.641766 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:26:56 crc kubenswrapper[4689]: I0123 12:26:56.640843 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:26:56 crc kubenswrapper[4689]: E0123 12:26:56.642122 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:27:10 crc kubenswrapper[4689]: I0123 12:27:10.641671 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:27:10 crc kubenswrapper[4689]: E0123 12:27:10.643403 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:27:25 crc kubenswrapper[4689]: I0123 12:27:25.656706 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:27:25 crc kubenswrapper[4689]: E0123 12:27:25.657714 4689 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-sp7sf_openshift-machine-config-operator(3d8de6cc-a03d-468b-bfe9-fbf544087653)\"" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" podUID="3d8de6cc-a03d-468b-bfe9-fbf544087653" Jan 23 12:27:37 crc kubenswrapper[4689]: I0123 12:27:37.641852 4689 scope.go:117] "RemoveContainer" containerID="170fe29a3467237bf02916ad822bed526edd0f0aaead1d575d17970168cb19e0" Jan 23 12:27:38 crc kubenswrapper[4689]: I0123 12:27:38.134283 4689 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-sp7sf" event={"ID":"3d8de6cc-a03d-468b-bfe9-fbf544087653","Type":"ContainerStarted","Data":"0f0ec415d6055669f430a5388a8d6feb8018771985102f6310bc20c4f1362bc0"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515134664714024460 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015134664714017375 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015134650510016505 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015134650510015455 5ustar corecore